migrate.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. /*
  2. * Copyright IBM Corporation, 2007
  3. * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2.1 of the GNU Lesser General Public License
  7. * as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. *
  13. */
  14. #include <linux/slab.h>
  15. #include "ext4_jbd2.h"
  16. #include "ext4_extents.h"
  17. /*
  18. * The contiguous blocks details which can be
  19. * represented by a single extent
  20. */
  21. struct migrate_struct {
  22. ext4_lblk_t first_block, last_block, curr_block;
  23. ext4_fsblk_t first_pblock, last_pblock;
  24. };
  25. static int finish_range(handle_t *handle, struct inode *inode,
  26. struct migrate_struct *lb)
  27. {
  28. int retval = 0, needed;
  29. struct ext4_extent newext;
  30. struct ext4_ext_path *path;
  31. if (lb->first_pblock == 0)
  32. return 0;
  33. /* Add the extent to temp inode*/
  34. newext.ee_block = cpu_to_le32(lb->first_block);
  35. newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
  36. ext4_ext_store_pblock(&newext, lb->first_pblock);
  37. /* Locking only for convinience since we are operating on temp inode */
  38. down_write(&EXT4_I(inode)->i_data_sem);
  39. path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0);
  40. if (IS_ERR(path)) {
  41. retval = PTR_ERR(path);
  42. path = NULL;
  43. goto err_out;
  44. }
  45. /*
  46. * Calculate the credit needed to inserting this extent
  47. * Since we are doing this in loop we may accumalate extra
  48. * credit. But below we try to not accumalate too much
  49. * of them by restarting the journal.
  50. */
  51. needed = ext4_ext_calc_credits_for_single_extent(inode,
  52. lb->last_block - lb->first_block + 1, path);
  53. /*
  54. * Make sure the credit we accumalated is not really high
  55. */
  56. if (needed && ext4_handle_has_enough_credits(handle,
  57. EXT4_RESERVE_TRANS_BLOCKS)) {
  58. up_write((&EXT4_I(inode)->i_data_sem));
  59. retval = ext4_journal_restart(handle, needed);
  60. down_write((&EXT4_I(inode)->i_data_sem));
  61. if (retval)
  62. goto err_out;
  63. } else if (needed) {
  64. retval = ext4_journal_extend(handle, needed);
  65. if (retval) {
  66. /*
  67. * IF not able to extend the journal restart the journal
  68. */
  69. up_write((&EXT4_I(inode)->i_data_sem));
  70. retval = ext4_journal_restart(handle, needed);
  71. down_write((&EXT4_I(inode)->i_data_sem));
  72. if (retval)
  73. goto err_out;
  74. }
  75. }
  76. retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
  77. err_out:
  78. up_write((&EXT4_I(inode)->i_data_sem));
  79. if (path) {
  80. ext4_ext_drop_refs(path);
  81. kfree(path);
  82. }
  83. lb->first_pblock = 0;
  84. return retval;
  85. }
  86. static int update_extent_range(handle_t *handle, struct inode *inode,
  87. ext4_fsblk_t pblock, struct migrate_struct *lb)
  88. {
  89. int retval;
  90. /*
  91. * See if we can add on to the existing range (if it exists)
  92. */
  93. if (lb->first_pblock &&
  94. (lb->last_pblock+1 == pblock) &&
  95. (lb->last_block+1 == lb->curr_block)) {
  96. lb->last_pblock = pblock;
  97. lb->last_block = lb->curr_block;
  98. lb->curr_block++;
  99. return 0;
  100. }
  101. /*
  102. * Start a new range.
  103. */
  104. retval = finish_range(handle, inode, lb);
  105. lb->first_pblock = lb->last_pblock = pblock;
  106. lb->first_block = lb->last_block = lb->curr_block;
  107. lb->curr_block++;
  108. return retval;
  109. }
  110. static int update_ind_extent_range(handle_t *handle, struct inode *inode,
  111. ext4_fsblk_t pblock,
  112. struct migrate_struct *lb)
  113. {
  114. struct buffer_head *bh;
  115. __le32 *i_data;
  116. int i, retval = 0;
  117. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  118. bh = sb_bread(inode->i_sb, pblock);
  119. if (!bh)
  120. return -EIO;
  121. i_data = (__le32 *)bh->b_data;
  122. for (i = 0; i < max_entries; i++) {
  123. if (i_data[i]) {
  124. retval = update_extent_range(handle, inode,
  125. le32_to_cpu(i_data[i]), lb);
  126. if (retval)
  127. break;
  128. } else {
  129. lb->curr_block++;
  130. }
  131. }
  132. put_bh(bh);
  133. return retval;
  134. }
  135. static int update_dind_extent_range(handle_t *handle, struct inode *inode,
  136. ext4_fsblk_t pblock,
  137. struct migrate_struct *lb)
  138. {
  139. struct buffer_head *bh;
  140. __le32 *i_data;
  141. int i, retval = 0;
  142. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  143. bh = sb_bread(inode->i_sb, pblock);
  144. if (!bh)
  145. return -EIO;
  146. i_data = (__le32 *)bh->b_data;
  147. for (i = 0; i < max_entries; i++) {
  148. if (i_data[i]) {
  149. retval = update_ind_extent_range(handle, inode,
  150. le32_to_cpu(i_data[i]), lb);
  151. if (retval)
  152. break;
  153. } else {
  154. /* Only update the file block number */
  155. lb->curr_block += max_entries;
  156. }
  157. }
  158. put_bh(bh);
  159. return retval;
  160. }
  161. static int update_tind_extent_range(handle_t *handle, struct inode *inode,
  162. ext4_fsblk_t pblock,
  163. struct migrate_struct *lb)
  164. {
  165. struct buffer_head *bh;
  166. __le32 *i_data;
  167. int i, retval = 0;
  168. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  169. bh = sb_bread(inode->i_sb, pblock);
  170. if (!bh)
  171. return -EIO;
  172. i_data = (__le32 *)bh->b_data;
  173. for (i = 0; i < max_entries; i++) {
  174. if (i_data[i]) {
  175. retval = update_dind_extent_range(handle, inode,
  176. le32_to_cpu(i_data[i]), lb);
  177. if (retval)
  178. break;
  179. } else {
  180. /* Only update the file block number */
  181. lb->curr_block += max_entries * max_entries;
  182. }
  183. }
  184. put_bh(bh);
  185. return retval;
  186. }
  187. static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
  188. {
  189. int retval = 0, needed;
  190. if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
  191. return 0;
  192. /*
  193. * We are freeing a blocks. During this we touch
  194. * superblock, group descriptor and block bitmap.
  195. * So allocate a credit of 3. We may update
  196. * quota (user and group).
  197. */
  198. needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
  199. if (ext4_journal_extend(handle, needed) != 0)
  200. retval = ext4_journal_restart(handle, needed);
  201. return retval;
  202. }
  203. static int free_dind_blocks(handle_t *handle,
  204. struct inode *inode, __le32 i_data)
  205. {
  206. int i;
  207. __le32 *tmp_idata;
  208. struct buffer_head *bh;
  209. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  210. bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
  211. if (!bh)
  212. return -EIO;
  213. tmp_idata = (__le32 *)bh->b_data;
  214. for (i = 0; i < max_entries; i++) {
  215. if (tmp_idata[i]) {
  216. extend_credit_for_blkdel(handle, inode);
  217. ext4_free_blocks(handle, inode, NULL,
  218. le32_to_cpu(tmp_idata[i]), 1,
  219. EXT4_FREE_BLOCKS_METADATA |
  220. EXT4_FREE_BLOCKS_FORGET);
  221. }
  222. }
  223. put_bh(bh);
  224. extend_credit_for_blkdel(handle, inode);
  225. ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
  226. EXT4_FREE_BLOCKS_METADATA |
  227. EXT4_FREE_BLOCKS_FORGET);
  228. return 0;
  229. }
  230. static int free_tind_blocks(handle_t *handle,
  231. struct inode *inode, __le32 i_data)
  232. {
  233. int i, retval = 0;
  234. __le32 *tmp_idata;
  235. struct buffer_head *bh;
  236. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  237. bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
  238. if (!bh)
  239. return -EIO;
  240. tmp_idata = (__le32 *)bh->b_data;
  241. for (i = 0; i < max_entries; i++) {
  242. if (tmp_idata[i]) {
  243. retval = free_dind_blocks(handle,
  244. inode, tmp_idata[i]);
  245. if (retval) {
  246. put_bh(bh);
  247. return retval;
  248. }
  249. }
  250. }
  251. put_bh(bh);
  252. extend_credit_for_blkdel(handle, inode);
  253. ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
  254. EXT4_FREE_BLOCKS_METADATA |
  255. EXT4_FREE_BLOCKS_FORGET);
  256. return 0;
  257. }
  258. static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
  259. {
  260. int retval;
  261. /* ei->i_data[EXT4_IND_BLOCK] */
  262. if (i_data[0]) {
  263. extend_credit_for_blkdel(handle, inode);
  264. ext4_free_blocks(handle, inode, NULL,
  265. le32_to_cpu(i_data[0]), 1,
  266. EXT4_FREE_BLOCKS_METADATA |
  267. EXT4_FREE_BLOCKS_FORGET);
  268. }
  269. /* ei->i_data[EXT4_DIND_BLOCK] */
  270. if (i_data[1]) {
  271. retval = free_dind_blocks(handle, inode, i_data[1]);
  272. if (retval)
  273. return retval;
  274. }
  275. /* ei->i_data[EXT4_TIND_BLOCK] */
  276. if (i_data[2]) {
  277. retval = free_tind_blocks(handle, inode, i_data[2]);
  278. if (retval)
  279. return retval;
  280. }
  281. return 0;
  282. }
  283. static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
  284. struct inode *tmp_inode)
  285. {
  286. int retval;
  287. __le32 i_data[3];
  288. struct ext4_inode_info *ei = EXT4_I(inode);
  289. struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
  290. /*
  291. * One credit accounted for writing the
  292. * i_data field of the original inode
  293. */
  294. retval = ext4_journal_extend(handle, 1);
  295. if (retval) {
  296. retval = ext4_journal_restart(handle, 1);
  297. if (retval)
  298. goto err_out;
  299. }
  300. i_data[0] = ei->i_data[EXT4_IND_BLOCK];
  301. i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
  302. i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
  303. down_write(&EXT4_I(inode)->i_data_sem);
  304. /*
  305. * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
  306. * happened after we started the migrate. We need to
  307. * fail the migrate
  308. */
  309. if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
  310. retval = -EAGAIN;
  311. up_write(&EXT4_I(inode)->i_data_sem);
  312. goto err_out;
  313. } else
  314. ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
  315. /*
  316. * We have the extent map build with the tmp inode.
  317. * Now copy the i_data across
  318. */
  319. ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
  320. memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
  321. /*
  322. * Update i_blocks with the new blocks that got
  323. * allocated while adding extents for extent index
  324. * blocks.
  325. *
  326. * While converting to extents we need not
  327. * update the orignal inode i_blocks for extent blocks
  328. * via quota APIs. The quota update happened via tmp_inode already.
  329. */
  330. spin_lock(&inode->i_lock);
  331. inode->i_blocks += tmp_inode->i_blocks;
  332. spin_unlock(&inode->i_lock);
  333. up_write(&EXT4_I(inode)->i_data_sem);
  334. /*
  335. * We mark the inode dirty after, because we decrement the
  336. * i_blocks when freeing the indirect meta-data blocks
  337. */
  338. retval = free_ind_block(handle, inode, i_data);
  339. ext4_mark_inode_dirty(handle, inode);
  340. err_out:
  341. return retval;
  342. }
  343. static int free_ext_idx(handle_t *handle, struct inode *inode,
  344. struct ext4_extent_idx *ix)
  345. {
  346. int i, retval = 0;
  347. ext4_fsblk_t block;
  348. struct buffer_head *bh;
  349. struct ext4_extent_header *eh;
  350. block = ext4_idx_pblock(ix);
  351. bh = sb_bread(inode->i_sb, block);
  352. if (!bh)
  353. return -EIO;
  354. eh = (struct ext4_extent_header *)bh->b_data;
  355. if (eh->eh_depth != 0) {
  356. ix = EXT_FIRST_INDEX(eh);
  357. for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
  358. retval = free_ext_idx(handle, inode, ix);
  359. if (retval)
  360. break;
  361. }
  362. }
  363. put_bh(bh);
  364. extend_credit_for_blkdel(handle, inode);
  365. ext4_free_blocks(handle, inode, NULL, block, 1,
  366. EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
  367. return retval;
  368. }
  369. /*
  370. * Free the extent meta data blocks only
  371. */
  372. static int free_ext_block(handle_t *handle, struct inode *inode)
  373. {
  374. int i, retval = 0;
  375. struct ext4_inode_info *ei = EXT4_I(inode);
  376. struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
  377. struct ext4_extent_idx *ix;
  378. if (eh->eh_depth == 0)
  379. /*
  380. * No extra blocks allocated for extent meta data
  381. */
  382. return 0;
  383. ix = EXT_FIRST_INDEX(eh);
  384. for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
  385. retval = free_ext_idx(handle, inode, ix);
  386. if (retval)
  387. return retval;
  388. }
  389. return retval;
  390. }
  391. int ext4_ext_migrate(struct inode *inode)
  392. {
  393. handle_t *handle;
  394. int retval = 0, i;
  395. __le32 *i_data;
  396. struct ext4_inode_info *ei;
  397. struct inode *tmp_inode = NULL;
  398. struct migrate_struct lb;
  399. unsigned long max_entries;
  400. __u32 goal;
  401. uid_t owner[2];
  402. /*
  403. * If the filesystem does not support extents, or the inode
  404. * already is extent-based, error out.
  405. */
  406. if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
  407. EXT4_FEATURE_INCOMPAT_EXTENTS) ||
  408. (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  409. return -EINVAL;
  410. if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
  411. /*
  412. * don't migrate fast symlink
  413. */
  414. return retval;
  415. /*
  416. * Worst case we can touch the allocation bitmaps, a bgd
  417. * block, and a block to link in the orphan list. We do need
  418. * need to worry about credits for modifying the quota inode.
  419. */
  420. handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
  421. 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
  422. if (IS_ERR(handle)) {
  423. retval = PTR_ERR(handle);
  424. return retval;
  425. }
  426. goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
  427. EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
  428. owner[0] = i_uid_read(inode);
  429. owner[1] = i_gid_read(inode);
  430. tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
  431. S_IFREG, NULL, goal, owner);
  432. if (IS_ERR(tmp_inode)) {
  433. retval = PTR_ERR(tmp_inode);
  434. ext4_journal_stop(handle);
  435. return retval;
  436. }
  437. i_size_write(tmp_inode, i_size_read(inode));
  438. /*
  439. * Set the i_nlink to zero so it will be deleted later
  440. * when we drop inode reference.
  441. */
  442. clear_nlink(tmp_inode);
  443. ext4_ext_tree_init(handle, tmp_inode);
  444. ext4_orphan_add(handle, tmp_inode);
  445. ext4_journal_stop(handle);
  446. /*
  447. * start with one credit accounted for
  448. * superblock modification.
  449. *
  450. * For the tmp_inode we already have committed the
  451. * transaction that created the inode. Later as and
  452. * when we add extents we extent the journal
  453. */
  454. /*
  455. * Even though we take i_mutex we can still cause block
  456. * allocation via mmap write to holes. If we have allocated
  457. * new blocks we fail migrate. New block allocation will
  458. * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
  459. * with i_data_sem held to prevent racing with block
  460. * allocation.
  461. */
  462. down_read(&EXT4_I(inode)->i_data_sem);
  463. ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
  464. up_read((&EXT4_I(inode)->i_data_sem));
  465. handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
  466. if (IS_ERR(handle)) {
  467. /*
  468. * It is impossible to update on-disk structures without
  469. * a handle, so just rollback in-core changes and live other
  470. * work to orphan_list_cleanup()
  471. */
  472. ext4_orphan_del(NULL, tmp_inode);
  473. retval = PTR_ERR(handle);
  474. goto out;
  475. }
  476. ei = EXT4_I(inode);
  477. i_data = ei->i_data;
  478. memset(&lb, 0, sizeof(lb));
  479. /* 32 bit block address 4 bytes */
  480. max_entries = inode->i_sb->s_blocksize >> 2;
  481. for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
  482. if (i_data[i]) {
  483. retval = update_extent_range(handle, tmp_inode,
  484. le32_to_cpu(i_data[i]), &lb);
  485. if (retval)
  486. goto err_out;
  487. } else
  488. lb.curr_block++;
  489. }
  490. if (i_data[EXT4_IND_BLOCK]) {
  491. retval = update_ind_extent_range(handle, tmp_inode,
  492. le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
  493. if (retval)
  494. goto err_out;
  495. } else
  496. lb.curr_block += max_entries;
  497. if (i_data[EXT4_DIND_BLOCK]) {
  498. retval = update_dind_extent_range(handle, tmp_inode,
  499. le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
  500. if (retval)
  501. goto err_out;
  502. } else
  503. lb.curr_block += max_entries * max_entries;
  504. if (i_data[EXT4_TIND_BLOCK]) {
  505. retval = update_tind_extent_range(handle, tmp_inode,
  506. le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
  507. if (retval)
  508. goto err_out;
  509. }
  510. /*
  511. * Build the last extent
  512. */
  513. retval = finish_range(handle, tmp_inode, &lb);
  514. err_out:
  515. if (retval)
  516. /*
  517. * Failure case delete the extent information with the
  518. * tmp_inode
  519. */
  520. free_ext_block(handle, tmp_inode);
  521. else {
  522. retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
  523. if (retval)
  524. /*
  525. * if we fail to swap inode data free the extent
  526. * details of the tmp inode
  527. */
  528. free_ext_block(handle, tmp_inode);
  529. }
  530. /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
  531. if (ext4_journal_extend(handle, 1) != 0)
  532. ext4_journal_restart(handle, 1);
  533. /*
  534. * Mark the tmp_inode as of size zero
  535. */
  536. i_size_write(tmp_inode, 0);
  537. /*
  538. * set the i_blocks count to zero
  539. * so that the ext4_delete_inode does the
  540. * right job
  541. *
  542. * We don't need to take the i_lock because
  543. * the inode is not visible to user space.
  544. */
  545. tmp_inode->i_blocks = 0;
  546. /* Reset the extent details */
  547. ext4_ext_tree_init(handle, tmp_inode);
  548. ext4_journal_stop(handle);
  549. out:
  550. unlock_new_inode(tmp_inode);
  551. iput(tmp_inode);
  552. return retval;
  553. }
  554. /*
  555. * Migrate a simple extent-based inode to use the i_blocks[] array
  556. */
  557. int ext4_ind_migrate(struct inode *inode)
  558. {
  559. struct ext4_extent_header *eh;
  560. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  561. struct ext4_inode_info *ei = EXT4_I(inode);
  562. struct ext4_extent *ex;
  563. unsigned int i, len;
  564. ext4_fsblk_t blk;
  565. handle_t *handle;
  566. int ret;
  567. if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
  568. EXT4_FEATURE_INCOMPAT_EXTENTS) ||
  569. (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  570. return -EINVAL;
  571. if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  572. EXT4_FEATURE_RO_COMPAT_BIGALLOC))
  573. return -EOPNOTSUPP;
  574. handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
  575. if (IS_ERR(handle))
  576. return PTR_ERR(handle);
  577. down_write(&EXT4_I(inode)->i_data_sem);
  578. ret = ext4_ext_check_inode(inode);
  579. if (ret)
  580. goto errout;
  581. eh = ext_inode_hdr(inode);
  582. ex = EXT_FIRST_EXTENT(eh);
  583. if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
  584. eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
  585. ret = -EOPNOTSUPP;
  586. goto errout;
  587. }
  588. if (eh->eh_entries == 0)
  589. blk = len = 0;
  590. else {
  591. len = le16_to_cpu(ex->ee_len);
  592. blk = ext4_ext_pblock(ex);
  593. if (len > EXT4_NDIR_BLOCKS) {
  594. ret = -EOPNOTSUPP;
  595. goto errout;
  596. }
  597. }
  598. ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
  599. memset(ei->i_data, 0, sizeof(ei->i_data));
  600. for (i=0; i < len; i++)
  601. ei->i_data[i] = cpu_to_le32(blk++);
  602. ext4_mark_inode_dirty(handle, inode);
  603. errout:
  604. ext4_journal_stop(handle);
  605. up_write(&EXT4_I(inode)->i_data_sem);
  606. return ret;
  607. }