indirect.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560
  1. /*
  2. * linux/fs/ext4/indirect.c
  3. *
  4. * from
  5. *
  6. * linux/fs/ext4/inode.c
  7. *
  8. * Copyright (C) 1992, 1993, 1994, 1995
  9. * Remy Card (card@masi.ibp.fr)
  10. * Laboratoire MASI - Institut Blaise Pascal
  11. * Universite Pierre et Marie Curie (Paris VI)
  12. *
  13. * from
  14. *
  15. * linux/fs/minix/inode.c
  16. *
  17. * Copyright (C) 1991, 1992 Linus Torvalds
  18. *
  19. * Goal-directed block allocation by Stephen Tweedie
  20. * (sct@redhat.com), 1993, 1998
  21. */
  22. #include "ext4_jbd2.h"
  23. #include "truncate.h"
  24. #include <linux/uio.h>
  25. #include <trace/events/ext4.h>
  26. typedef struct {
  27. __le32 *p;
  28. __le32 key;
  29. struct buffer_head *bh;
  30. } Indirect;
  31. static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
  32. {
  33. p->key = *(p->p = v);
  34. p->bh = bh;
  35. }
  36. /**
  37. * ext4_block_to_path - parse the block number into array of offsets
  38. * @inode: inode in question (we are only interested in its superblock)
  39. * @i_block: block number to be parsed
  40. * @offsets: array to store the offsets in
  41. * @boundary: set this non-zero if the referred-to block is likely to be
  42. * followed (on disk) by an indirect block.
  43. *
  44. * To store the locations of file's data ext4 uses a data structure common
  45. * for UNIX filesystems - tree of pointers anchored in the inode, with
  46. * data blocks at leaves and indirect blocks in intermediate nodes.
  47. * This function translates the block number into path in that tree -
  48. * return value is the path length and @offsets[n] is the offset of
  49. * pointer to (n+1)th node in the nth one. If @block is out of range
  50. * (negative or too large) warning is printed and zero returned.
  51. *
  52. * Note: function doesn't find node addresses, so no IO is needed. All
  53. * we need to know is the capacity of indirect blocks (taken from the
  54. * inode->i_sb).
  55. */
  56. /*
  57. * Portability note: the last comparison (check that we fit into triple
  58. * indirect block) is spelled differently, because otherwise on an
  59. * architecture with 32-bit longs and 8Kb pages we might get into trouble
  60. * if our filesystem had 8Kb blocks. We might use long long, but that would
  61. * kill us on x86. Oh, well, at least the sign propagation does not matter -
  62. * i_block would have to be negative in the very beginning, so we would not
  63. * get there at all.
  64. */
  65. static int ext4_block_to_path(struct inode *inode,
  66. ext4_lblk_t i_block,
  67. ext4_lblk_t offsets[4], int *boundary)
  68. {
  69. int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  70. int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
  71. const long direct_blocks = EXT4_NDIR_BLOCKS,
  72. indirect_blocks = ptrs,
  73. double_blocks = (1 << (ptrs_bits * 2));
  74. int n = 0;
  75. int final = 0;
  76. if (i_block < direct_blocks) {
  77. offsets[n++] = i_block;
  78. final = direct_blocks;
  79. } else if ((i_block -= direct_blocks) < indirect_blocks) {
  80. offsets[n++] = EXT4_IND_BLOCK;
  81. offsets[n++] = i_block;
  82. final = ptrs;
  83. } else if ((i_block -= indirect_blocks) < double_blocks) {
  84. offsets[n++] = EXT4_DIND_BLOCK;
  85. offsets[n++] = i_block >> ptrs_bits;
  86. offsets[n++] = i_block & (ptrs - 1);
  87. final = ptrs;
  88. } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  89. offsets[n++] = EXT4_TIND_BLOCK;
  90. offsets[n++] = i_block >> (ptrs_bits * 2);
  91. offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  92. offsets[n++] = i_block & (ptrs - 1);
  93. final = ptrs;
  94. } else {
  95. ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
  96. i_block + direct_blocks +
  97. indirect_blocks + double_blocks, inode->i_ino);
  98. }
  99. if (boundary)
  100. *boundary = final - 1 - (i_block & (ptrs - 1));
  101. return n;
  102. }
  103. /**
  104. * ext4_get_branch - read the chain of indirect blocks leading to data
  105. * @inode: inode in question
  106. * @depth: depth of the chain (1 - direct pointer, etc.)
  107. * @offsets: offsets of pointers in inode/indirect blocks
  108. * @chain: place to store the result
  109. * @err: here we store the error value
  110. *
  111. * Function fills the array of triples <key, p, bh> and returns %NULL
  112. * if everything went OK or the pointer to the last filled triple
  113. * (incomplete one) otherwise. Upon the return chain[i].key contains
  114. * the number of (i+1)-th block in the chain (as it is stored in memory,
  115. * i.e. little-endian 32-bit), chain[i].p contains the address of that
  116. * number (it points into struct inode for i==0 and into the bh->b_data
  117. * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
  118. * block for i>0 and NULL for i==0. In other words, it holds the block
  119. * numbers of the chain, addresses they were taken from (and where we can
  120. * verify that chain did not change) and buffer_heads hosting these
  121. * numbers.
  122. *
  123. * Function stops when it stumbles upon zero pointer (absent block)
  124. * (pointer to last triple returned, *@err == 0)
  125. * or when it gets an IO error reading an indirect block
  126. * (ditto, *@err == -EIO)
  127. * or when it reads all @depth-1 indirect blocks successfully and finds
  128. * the whole chain, all way to the data (returns %NULL, *err == 0).
  129. *
  130. * Need to be called with
  131. * down_read(&EXT4_I(inode)->i_data_sem)
  132. */
  133. static Indirect *ext4_get_branch(struct inode *inode, int depth,
  134. ext4_lblk_t *offsets,
  135. Indirect chain[4], int *err)
  136. {
  137. struct super_block *sb = inode->i_sb;
  138. Indirect *p = chain;
  139. struct buffer_head *bh;
  140. int ret = -EIO;
  141. *err = 0;
  142. /* i_data is not going away, no lock needed */
  143. add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
  144. if (!p->key)
  145. goto no_block;
  146. while (--depth) {
  147. bh = sb_getblk(sb, le32_to_cpu(p->key));
  148. if (unlikely(!bh)) {
  149. ret = -ENOMEM;
  150. goto failure;
  151. }
  152. if (!bh_uptodate_or_lock(bh)) {
  153. if (bh_submit_read(bh) < 0) {
  154. put_bh(bh);
  155. goto failure;
  156. }
  157. /* validate block references */
  158. if (ext4_check_indirect_blockref(inode, bh)) {
  159. put_bh(bh);
  160. goto failure;
  161. }
  162. }
  163. add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
  164. /* Reader: end */
  165. if (!p->key)
  166. goto no_block;
  167. }
  168. return NULL;
  169. failure:
  170. *err = ret;
  171. no_block:
  172. return p;
  173. }
  174. /**
  175. * ext4_find_near - find a place for allocation with sufficient locality
  176. * @inode: owner
  177. * @ind: descriptor of indirect block.
  178. *
  179. * This function returns the preferred place for block allocation.
  180. * It is used when heuristic for sequential allocation fails.
  181. * Rules are:
  182. * + if there is a block to the left of our position - allocate near it.
  183. * + if pointer will live in indirect block - allocate near that block.
  184. * + if pointer will live in inode - allocate in the same
  185. * cylinder group.
  186. *
  187. * In the latter case we colour the starting block by the callers PID to
  188. * prevent it from clashing with concurrent allocations for a different inode
  189. * in the same block group. The PID is used here so that functionally related
  190. * files will be close-by on-disk.
  191. *
  192. * Caller must make sure that @ind is valid and will stay that way.
  193. */
  194. static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
  195. {
  196. struct ext4_inode_info *ei = EXT4_I(inode);
  197. __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
  198. __le32 *p;
  199. /* Try to find previous block */
  200. for (p = ind->p - 1; p >= start; p--) {
  201. if (*p)
  202. return le32_to_cpu(*p);
  203. }
  204. /* No such thing, so let's try location of indirect block */
  205. if (ind->bh)
  206. return ind->bh->b_blocknr;
  207. /*
  208. * It is going to be referred to from the inode itself? OK, just put it
  209. * into the same cylinder group then.
  210. */
  211. return ext4_inode_to_goal_block(inode);
  212. }
  213. /**
  214. * ext4_find_goal - find a preferred place for allocation.
  215. * @inode: owner
  216. * @block: block we want
  217. * @partial: pointer to the last triple within a chain
  218. *
  219. * Normally this function find the preferred place for block allocation,
  220. * returns it.
  221. * Because this is only used for non-extent files, we limit the block nr
  222. * to 32 bits.
  223. */
  224. static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
  225. Indirect *partial)
  226. {
  227. ext4_fsblk_t goal;
  228. /*
  229. * XXX need to get goal block from mballoc's data structures
  230. */
  231. goal = ext4_find_near(inode, partial);
  232. goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
  233. return goal;
  234. }
  235. /**
  236. * ext4_blks_to_allocate - Look up the block map and count the number
  237. * of direct blocks need to be allocated for the given branch.
  238. *
  239. * @branch: chain of indirect blocks
  240. * @k: number of blocks need for indirect blocks
  241. * @blks: number of data blocks to be mapped.
  242. * @blocks_to_boundary: the offset in the indirect block
  243. *
  244. * return the total number of blocks to be allocate, including the
  245. * direct and indirect blocks.
  246. */
  247. static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
  248. int blocks_to_boundary)
  249. {
  250. unsigned int count = 0;
  251. /*
  252. * Simple case, [t,d]Indirect block(s) has not allocated yet
  253. * then it's clear blocks on that path have not allocated
  254. */
  255. if (k > 0) {
  256. /* right now we don't handle cross boundary allocation */
  257. if (blks < blocks_to_boundary + 1)
  258. count += blks;
  259. else
  260. count += blocks_to_boundary + 1;
  261. return count;
  262. }
  263. count++;
  264. while (count < blks && count <= blocks_to_boundary &&
  265. le32_to_cpu(*(branch[0].p + count)) == 0) {
  266. count++;
  267. }
  268. return count;
  269. }
  270. /**
  271. * ext4_alloc_branch - allocate and set up a chain of blocks.
  272. * @handle: handle for this transaction
  273. * @inode: owner
  274. * @indirect_blks: number of allocated indirect blocks
  275. * @blks: number of allocated direct blocks
  276. * @goal: preferred place for allocation
  277. * @offsets: offsets (in the blocks) to store the pointers to next.
  278. * @branch: place to store the chain in.
  279. *
  280. * This function allocates blocks, zeroes out all but the last one,
  281. * links them into chain and (if we are synchronous) writes them to disk.
  282. * In other words, it prepares a branch that can be spliced onto the
  283. * inode. It stores the information about that chain in the branch[], in
  284. * the same format as ext4_get_branch() would do. We are calling it after
  285. * we had read the existing part of chain and partial points to the last
  286. * triple of that (one with zero ->key). Upon the exit we have the same
  287. * picture as after the successful ext4_get_block(), except that in one
  288. * place chain is disconnected - *branch->p is still zero (we did not
  289. * set the last link), but branch->key contains the number that should
  290. * be placed into *branch->p to fill that gap.
  291. *
  292. * If allocation fails we free all blocks we've allocated (and forget
  293. * their buffer_heads) and return the error value the from failed
  294. * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
  295. * as described above and return 0.
  296. */
  297. static int ext4_alloc_branch(handle_t *handle,
  298. struct ext4_allocation_request *ar,
  299. int indirect_blks, ext4_lblk_t *offsets,
  300. Indirect *branch)
  301. {
  302. struct buffer_head * bh;
  303. ext4_fsblk_t b, new_blocks[4];
  304. __le32 *p;
  305. int i, j, err, len = 1;
  306. for (i = 0; i <= indirect_blks; i++) {
  307. if (i == indirect_blks) {
  308. new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
  309. } else
  310. ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
  311. ar->inode, ar->goal,
  312. ar->flags & EXT4_MB_DELALLOC_RESERVED,
  313. NULL, &err);
  314. if (err) {
  315. i--;
  316. goto failed;
  317. }
  318. branch[i].key = cpu_to_le32(new_blocks[i]);
  319. if (i == 0)
  320. continue;
  321. bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
  322. if (unlikely(!bh)) {
  323. err = -ENOMEM;
  324. goto failed;
  325. }
  326. lock_buffer(bh);
  327. BUFFER_TRACE(bh, "call get_create_access");
  328. err = ext4_journal_get_create_access(handle, bh);
  329. if (err) {
  330. unlock_buffer(bh);
  331. goto failed;
  332. }
  333. memset(bh->b_data, 0, bh->b_size);
  334. p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
  335. b = new_blocks[i];
  336. if (i == indirect_blks)
  337. len = ar->len;
  338. for (j = 0; j < len; j++)
  339. *p++ = cpu_to_le32(b++);
  340. BUFFER_TRACE(bh, "marking uptodate");
  341. set_buffer_uptodate(bh);
  342. unlock_buffer(bh);
  343. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  344. err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
  345. if (err)
  346. goto failed;
  347. }
  348. return 0;
  349. failed:
  350. for (; i >= 0; i--) {
  351. /*
  352. * We want to ext4_forget() only freshly allocated indirect
  353. * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
  354. * buffer at branch[0].bh is indirect block / inode already
  355. * existing before ext4_alloc_branch() was called.
  356. */
  357. if (i > 0 && i != indirect_blks && branch[i].bh)
  358. ext4_forget(handle, 1, ar->inode, branch[i].bh,
  359. branch[i].bh->b_blocknr);
  360. ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
  361. (i == indirect_blks) ? ar->len : 1, 0);
  362. }
  363. return err;
  364. }
  365. /**
  366. * ext4_splice_branch - splice the allocated branch onto inode.
  367. * @handle: handle for this transaction
  368. * @inode: owner
  369. * @block: (logical) number of block we are adding
  370. * @chain: chain of indirect blocks (with a missing link - see
  371. * ext4_alloc_branch)
  372. * @where: location of missing link
  373. * @num: number of indirect blocks we are adding
  374. * @blks: number of direct blocks we are adding
  375. *
  376. * This function fills the missing link and does all housekeeping needed in
  377. * inode (->i_blocks, etc.). In case of success we end up with the full
  378. * chain to new block and return 0.
  379. */
  380. static int ext4_splice_branch(handle_t *handle,
  381. struct ext4_allocation_request *ar,
  382. Indirect *where, int num)
  383. {
  384. int i;
  385. int err = 0;
  386. ext4_fsblk_t current_block;
  387. /*
  388. * If we're splicing into a [td]indirect block (as opposed to the
  389. * inode) then we need to get write access to the [td]indirect block
  390. * before the splice.
  391. */
  392. if (where->bh) {
  393. BUFFER_TRACE(where->bh, "get_write_access");
  394. err = ext4_journal_get_write_access(handle, where->bh);
  395. if (err)
  396. goto err_out;
  397. }
  398. /* That's it */
  399. *where->p = where->key;
  400. /*
  401. * Update the host buffer_head or inode to point to more just allocated
  402. * direct blocks blocks
  403. */
  404. if (num == 0 && ar->len > 1) {
  405. current_block = le32_to_cpu(where->key) + 1;
  406. for (i = 1; i < ar->len; i++)
  407. *(where->p + i) = cpu_to_le32(current_block++);
  408. }
  409. /* We are done with atomic stuff, now do the rest of housekeeping */
  410. /* had we spliced it onto indirect block? */
  411. if (where->bh) {
  412. /*
  413. * If we spliced it onto an indirect block, we haven't
  414. * altered the inode. Note however that if it is being spliced
  415. * onto an indirect block at the very end of the file (the
  416. * file is growing) then we *will* alter the inode to reflect
  417. * the new i_size. But that is not done here - it is done in
  418. * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
  419. */
  420. jbd_debug(5, "splicing indirect only\n");
  421. BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
  422. err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
  423. if (err)
  424. goto err_out;
  425. } else {
  426. /*
  427. * OK, we spliced it into the inode itself on a direct block.
  428. */
  429. ext4_mark_inode_dirty(handle, ar->inode);
  430. jbd_debug(5, "splicing direct\n");
  431. }
  432. return err;
  433. err_out:
  434. for (i = 1; i <= num; i++) {
  435. /*
  436. * branch[i].bh is newly allocated, so there is no
  437. * need to revoke the block, which is why we don't
  438. * need to set EXT4_FREE_BLOCKS_METADATA.
  439. */
  440. ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
  441. EXT4_FREE_BLOCKS_FORGET);
  442. }
  443. ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
  444. ar->len, 0);
  445. return err;
  446. }
  447. /*
  448. * The ext4_ind_map_blocks() function handles non-extents inodes
  449. * (i.e., using the traditional indirect/double-indirect i_blocks
  450. * scheme) for ext4_map_blocks().
  451. *
  452. * Allocation strategy is simple: if we have to allocate something, we will
  453. * have to go the whole way to leaf. So let's do it before attaching anything
  454. * to tree, set linkage between the newborn blocks, write them if sync is
  455. * required, recheck the path, free and repeat if check fails, otherwise
  456. * set the last missing link (that will protect us from any truncate-generated
  457. * removals - all blocks on the path are immune now) and possibly force the
  458. * write on the parent block.
  459. * That has a nice additional property: no special recovery from the failed
  460. * allocations is needed - we simply release blocks and do not touch anything
  461. * reachable from inode.
  462. *
  463. * `handle' can be NULL if create == 0.
  464. *
  465. * return > 0, # of blocks mapped or allocated.
  466. * return = 0, if plain lookup failed.
  467. * return < 0, error case.
  468. *
  469. * The ext4_ind_get_blocks() function should be called with
  470. * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
  471. * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
  472. * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
  473. * blocks.
  474. */
  475. int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
  476. struct ext4_map_blocks *map,
  477. int flags)
  478. {
  479. struct ext4_allocation_request ar;
  480. int err = -EIO;
  481. ext4_lblk_t offsets[4];
  482. Indirect chain[4];
  483. Indirect *partial;
  484. int indirect_blks;
  485. int blocks_to_boundary = 0;
  486. int depth;
  487. int count = 0;
  488. ext4_fsblk_t first_block = 0;
  489. trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
  490. J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
  491. J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
  492. depth = ext4_block_to_path(inode, map->m_lblk, offsets,
  493. &blocks_to_boundary);
  494. if (depth == 0)
  495. goto out;
  496. partial = ext4_get_branch(inode, depth, offsets, chain, &err);
  497. /* Simplest case - block found, no allocation needed */
  498. if (!partial) {
  499. first_block = le32_to_cpu(chain[depth - 1].key);
  500. count++;
  501. /*map more blocks*/
  502. while (count < map->m_len && count <= blocks_to_boundary) {
  503. ext4_fsblk_t blk;
  504. blk = le32_to_cpu(*(chain[depth-1].p + count));
  505. if (blk == first_block + count)
  506. count++;
  507. else
  508. break;
  509. }
  510. goto got_it;
  511. }
  512. /* Next simple case - plain lookup or failed read of indirect block */
  513. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
  514. goto cleanup;
  515. /*
  516. * Okay, we need to do block allocation.
  517. */
  518. if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  519. EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
  520. EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
  521. "non-extent mapped inodes with bigalloc");
  522. return -EUCLEAN;
  523. }
  524. /* Set up for the direct block allocation */
  525. memset(&ar, 0, sizeof(ar));
  526. ar.inode = inode;
  527. ar.logical = map->m_lblk;
  528. if (S_ISREG(inode->i_mode))
  529. ar.flags = EXT4_MB_HINT_DATA;
  530. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  531. ar.flags |= EXT4_MB_DELALLOC_RESERVED;
  532. if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
  533. ar.flags |= EXT4_MB_USE_RESERVED;
  534. ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
  535. /* the number of blocks need to allocate for [d,t]indirect blocks */
  536. indirect_blks = (chain + depth) - partial - 1;
  537. /*
  538. * Next look up the indirect map to count the totoal number of
  539. * direct blocks to allocate for this branch.
  540. */
  541. ar.len = ext4_blks_to_allocate(partial, indirect_blks,
  542. map->m_len, blocks_to_boundary);
  543. /*
  544. * Block out ext4_truncate while we alter the tree
  545. */
  546. err = ext4_alloc_branch(handle, &ar, indirect_blks,
  547. offsets + (partial - chain), partial);
  548. /*
  549. * The ext4_splice_branch call will free and forget any buffers
  550. * on the new chain if there is a failure, but that risks using
  551. * up transaction credits, especially for bitmaps where the
  552. * credits cannot be returned. Can we handle this somehow? We
  553. * may need to return -EAGAIN upwards in the worst case. --sct
  554. */
  555. if (!err)
  556. err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
  557. if (err)
  558. goto cleanup;
  559. map->m_flags |= EXT4_MAP_NEW;
  560. ext4_update_inode_fsync_trans(handle, inode, 1);
  561. count = ar.len;
  562. got_it:
  563. map->m_flags |= EXT4_MAP_MAPPED;
  564. map->m_pblk = le32_to_cpu(chain[depth-1].key);
  565. map->m_len = count;
  566. if (count > blocks_to_boundary)
  567. map->m_flags |= EXT4_MAP_BOUNDARY;
  568. err = count;
  569. /* Clean up and exit */
  570. partial = chain + depth - 1; /* the whole chain */
  571. cleanup:
  572. while (partial > chain) {
  573. BUFFER_TRACE(partial->bh, "call brelse");
  574. brelse(partial->bh);
  575. partial--;
  576. }
  577. out:
  578. trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
  579. return err;
  580. }
  581. /*
  582. * O_DIRECT for ext3 (or indirect map) based files
  583. *
  584. * If the O_DIRECT write will extend the file then add this inode to the
  585. * orphan list. So recovery will truncate it back to the original size
  586. * if the machine crashes during the write.
  587. *
  588. * If the O_DIRECT write is intantiating holes inside i_size and the machine
  589. * crashes then stale disk data _may_ be exposed inside the file. But current
  590. * VFS code falls back into buffered path in that case so we are safe.
  591. */
  592. ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
  593. loff_t offset)
  594. {
  595. struct file *file = iocb->ki_filp;
  596. struct inode *inode = file->f_mapping->host;
  597. struct ext4_inode_info *ei = EXT4_I(inode);
  598. handle_t *handle;
  599. ssize_t ret;
  600. int orphan = 0;
  601. size_t count = iov_iter_count(iter);
  602. int retries = 0;
  603. if (iov_iter_rw(iter) == WRITE) {
  604. loff_t final_size = offset + count;
  605. if (final_size > inode->i_size) {
  606. /* Credits for sb + inode write */
  607. handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
  608. if (IS_ERR(handle)) {
  609. ret = PTR_ERR(handle);
  610. goto out;
  611. }
  612. ret = ext4_orphan_add(handle, inode);
  613. if (ret) {
  614. ext4_journal_stop(handle);
  615. goto out;
  616. }
  617. orphan = 1;
  618. ei->i_disksize = inode->i_size;
  619. ext4_journal_stop(handle);
  620. }
  621. }
  622. retry:
  623. if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) {
  624. /*
  625. * Nolock dioread optimization may be dynamically disabled
  626. * via ext4_inode_block_unlocked_dio(). Check inode's state
  627. * while holding extra i_dio_count ref.
  628. */
  629. inode_dio_begin(inode);
  630. smp_mb();
  631. if (unlikely(ext4_test_inode_state(inode,
  632. EXT4_STATE_DIOREAD_LOCK))) {
  633. inode_dio_end(inode);
  634. goto locked;
  635. }
  636. if (IS_DAX(inode))
  637. ret = dax_do_io(iocb, inode, iter, offset,
  638. ext4_get_block, NULL, 0);
  639. else
  640. ret = __blockdev_direct_IO(iocb, inode,
  641. inode->i_sb->s_bdev, iter,
  642. offset, ext4_get_block, NULL,
  643. NULL, 0);
  644. inode_dio_end(inode);
  645. } else {
  646. locked:
  647. if (IS_DAX(inode))
  648. ret = dax_do_io(iocb, inode, iter, offset,
  649. ext4_get_block, NULL, DIO_LOCKING);
  650. else
  651. ret = blockdev_direct_IO(iocb, inode, iter, offset,
  652. ext4_get_block);
  653. if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
  654. loff_t isize = i_size_read(inode);
  655. loff_t end = offset + count;
  656. if (end > isize)
  657. ext4_truncate_failed_write(inode);
  658. }
  659. }
  660. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  661. goto retry;
  662. if (orphan) {
  663. int err;
  664. /* Credits for sb + inode write */
  665. handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
  666. if (IS_ERR(handle)) {
  667. /* This is really bad luck. We've written the data
  668. * but cannot extend i_size. Bail out and pretend
  669. * the write failed... */
  670. ret = PTR_ERR(handle);
  671. if (inode->i_nlink)
  672. ext4_orphan_del(NULL, inode);
  673. goto out;
  674. }
  675. if (inode->i_nlink)
  676. ext4_orphan_del(handle, inode);
  677. if (ret > 0) {
  678. loff_t end = offset + ret;
  679. if (end > inode->i_size) {
  680. ei->i_disksize = end;
  681. i_size_write(inode, end);
  682. /*
  683. * We're going to return a positive `ret'
  684. * here due to non-zero-length I/O, so there's
  685. * no way of reporting error returns from
  686. * ext4_mark_inode_dirty() to userspace. So
  687. * ignore it.
  688. */
  689. ext4_mark_inode_dirty(handle, inode);
  690. }
  691. }
  692. err = ext4_journal_stop(handle);
  693. if (ret == 0)
  694. ret = err;
  695. }
  696. out:
  697. return ret;
  698. }
  699. /*
  700. * Calculate the number of metadata blocks need to reserve
  701. * to allocate a new block at @lblocks for non extent file based file
  702. */
  703. int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
  704. {
  705. struct ext4_inode_info *ei = EXT4_I(inode);
  706. sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
  707. int blk_bits;
  708. if (lblock < EXT4_NDIR_BLOCKS)
  709. return 0;
  710. lblock -= EXT4_NDIR_BLOCKS;
  711. if (ei->i_da_metadata_calc_len &&
  712. (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
  713. ei->i_da_metadata_calc_len++;
  714. return 0;
  715. }
  716. ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
  717. ei->i_da_metadata_calc_len = 1;
  718. blk_bits = order_base_2(lblock);
  719. return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
  720. }
  721. /*
  722. * Calculate number of indirect blocks touched by mapping @nrblocks logically
  723. * contiguous blocks
  724. */
  725. int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
  726. {
  727. /*
  728. * With N contiguous data blocks, we need at most
  729. * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
  730. * 2 dindirect blocks, and 1 tindirect block
  731. */
  732. return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
  733. }
  734. /*
  735. * Truncate transactions can be complex and absolutely huge. So we need to
  736. * be able to restart the transaction at a conventient checkpoint to make
  737. * sure we don't overflow the journal.
  738. *
  739. * Try to extend this transaction for the purposes of truncation. If
  740. * extend fails, we need to propagate the failure up and restart the
  741. * transaction in the top-level truncate loop. --sct
  742. *
  743. * Returns 0 if we managed to create more room. If we can't create more
  744. * room, and the transaction must be restarted we return 1.
  745. */
  746. static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  747. {
  748. if (!ext4_handle_valid(handle))
  749. return 0;
  750. if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
  751. return 0;
  752. if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
  753. return 0;
  754. return 1;
  755. }
  756. /*
  757. * Probably it should be a library function... search for first non-zero word
  758. * or memcmp with zero_page, whatever is better for particular architecture.
  759. * Linus?
  760. */
  761. static inline int all_zeroes(__le32 *p, __le32 *q)
  762. {
  763. while (p < q)
  764. if (*p++)
  765. return 0;
  766. return 1;
  767. }
  768. /**
  769. * ext4_find_shared - find the indirect blocks for partial truncation.
  770. * @inode: inode in question
  771. * @depth: depth of the affected branch
  772. * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
  773. * @chain: place to store the pointers to partial indirect blocks
  774. * @top: place to the (detached) top of branch
  775. *
  776. * This is a helper function used by ext4_truncate().
  777. *
  778. * When we do truncate() we may have to clean the ends of several
  779. * indirect blocks but leave the blocks themselves alive. Block is
  780. * partially truncated if some data below the new i_size is referred
  781. * from it (and it is on the path to the first completely truncated
  782. * data block, indeed). We have to free the top of that path along
  783. * with everything to the right of the path. Since no allocation
  784. * past the truncation point is possible until ext4_truncate()
  785. * finishes, we may safely do the latter, but top of branch may
  786. * require special attention - pageout below the truncation point
  787. * might try to populate it.
  788. *
  789. * We atomically detach the top of branch from the tree, store the
  790. * block number of its root in *@top, pointers to buffer_heads of
  791. * partially truncated blocks - in @chain[].bh and pointers to
  792. * their last elements that should not be removed - in
  793. * @chain[].p. Return value is the pointer to last filled element
  794. * of @chain.
  795. *
  796. * The work left to caller to do the actual freeing of subtrees:
  797. * a) free the subtree starting from *@top
  798. * b) free the subtrees whose roots are stored in
  799. * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
  800. * c) free the subtrees growing from the inode past the @chain[0].
  801. * (no partially truncated stuff there). */
  802. static Indirect *ext4_find_shared(struct inode *inode, int depth,
  803. ext4_lblk_t offsets[4], Indirect chain[4],
  804. __le32 *top)
  805. {
  806. Indirect *partial, *p;
  807. int k, err;
  808. *top = 0;
  809. /* Make k index the deepest non-null offset + 1 */
  810. for (k = depth; k > 1 && !offsets[k-1]; k--)
  811. ;
  812. partial = ext4_get_branch(inode, k, offsets, chain, &err);
  813. /* Writer: pointers */
  814. if (!partial)
  815. partial = chain + k-1;
  816. /*
  817. * If the branch acquired continuation since we've looked at it -
  818. * fine, it should all survive and (new) top doesn't belong to us.
  819. */
  820. if (!partial->key && *partial->p)
  821. /* Writer: end */
  822. goto no_top;
  823. for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
  824. ;
  825. /*
  826. * OK, we've found the last block that must survive. The rest of our
  827. * branch should be detached before unlocking. However, if that rest
  828. * of branch is all ours and does not grow immediately from the inode
  829. * it's easier to cheat and just decrement partial->p.
  830. */
  831. if (p == chain + k - 1 && p > chain) {
  832. p->p--;
  833. } else {
  834. *top = *p->p;
  835. /* Nope, don't do this in ext4. Must leave the tree intact */
  836. #if 0
  837. *p->p = 0;
  838. #endif
  839. }
  840. /* Writer: end */
  841. while (partial > p) {
  842. brelse(partial->bh);
  843. partial--;
  844. }
  845. no_top:
  846. return partial;
  847. }
  848. /*
  849. * Zero a number of block pointers in either an inode or an indirect block.
  850. * If we restart the transaction we must again get write access to the
  851. * indirect block for further modification.
  852. *
  853. * We release `count' blocks on disk, but (last - first) may be greater
  854. * than `count' because there can be holes in there.
  855. *
  856. * Return 0 on success, 1 on invalid block range
  857. * and < 0 on fatal error.
  858. */
  859. static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
  860. struct buffer_head *bh,
  861. ext4_fsblk_t block_to_free,
  862. unsigned long count, __le32 *first,
  863. __le32 *last)
  864. {
  865. __le32 *p;
  866. int flags = EXT4_FREE_BLOCKS_VALIDATED;
  867. int err;
  868. if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
  869. flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
  870. else if (ext4_should_journal_data(inode))
  871. flags |= EXT4_FREE_BLOCKS_FORGET;
  872. if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
  873. count)) {
  874. EXT4_ERROR_INODE(inode, "attempt to clear invalid "
  875. "blocks %llu len %lu",
  876. (unsigned long long) block_to_free, count);
  877. return 1;
  878. }
  879. if (try_to_extend_transaction(handle, inode)) {
  880. if (bh) {
  881. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  882. err = ext4_handle_dirty_metadata(handle, inode, bh);
  883. if (unlikely(err))
  884. goto out_err;
  885. }
  886. err = ext4_mark_inode_dirty(handle, inode);
  887. if (unlikely(err))
  888. goto out_err;
  889. err = ext4_truncate_restart_trans(handle, inode,
  890. ext4_blocks_for_truncate(inode));
  891. if (unlikely(err))
  892. goto out_err;
  893. if (bh) {
  894. BUFFER_TRACE(bh, "retaking write access");
  895. err = ext4_journal_get_write_access(handle, bh);
  896. if (unlikely(err))
  897. goto out_err;
  898. }
  899. }
  900. for (p = first; p < last; p++)
  901. *p = 0;
  902. ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
  903. return 0;
  904. out_err:
  905. ext4_std_error(inode->i_sb, err);
  906. return err;
  907. }
  908. /**
  909. * ext4_free_data - free a list of data blocks
  910. * @handle: handle for this transaction
  911. * @inode: inode we are dealing with
  912. * @this_bh: indirect buffer_head which contains *@first and *@last
  913. * @first: array of block numbers
  914. * @last: points immediately past the end of array
  915. *
  916. * We are freeing all blocks referred from that array (numbers are stored as
  917. * little-endian 32-bit) and updating @inode->i_blocks appropriately.
  918. *
  919. * We accumulate contiguous runs of blocks to free. Conveniently, if these
  920. * blocks are contiguous then releasing them at one time will only affect one
  921. * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
  922. * actually use a lot of journal space.
  923. *
  924. * @this_bh will be %NULL if @first and @last point into the inode's direct
  925. * block pointers.
  926. */
  927. static void ext4_free_data(handle_t *handle, struct inode *inode,
  928. struct buffer_head *this_bh,
  929. __le32 *first, __le32 *last)
  930. {
  931. ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
  932. unsigned long count = 0; /* Number of blocks in the run */
  933. __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
  934. corresponding to
  935. block_to_free */
  936. ext4_fsblk_t nr; /* Current block # */
  937. __le32 *p; /* Pointer into inode/ind
  938. for current block */
  939. int err = 0;
  940. if (this_bh) { /* For indirect block */
  941. BUFFER_TRACE(this_bh, "get_write_access");
  942. err = ext4_journal_get_write_access(handle, this_bh);
  943. /* Important: if we can't update the indirect pointers
  944. * to the blocks, we can't free them. */
  945. if (err)
  946. return;
  947. }
  948. for (p = first; p < last; p++) {
  949. nr = le32_to_cpu(*p);
  950. if (nr) {
  951. /* accumulate blocks to free if they're contiguous */
  952. if (count == 0) {
  953. block_to_free = nr;
  954. block_to_free_p = p;
  955. count = 1;
  956. } else if (nr == block_to_free + count) {
  957. count++;
  958. } else {
  959. err = ext4_clear_blocks(handle, inode, this_bh,
  960. block_to_free, count,
  961. block_to_free_p, p);
  962. if (err)
  963. break;
  964. block_to_free = nr;
  965. block_to_free_p = p;
  966. count = 1;
  967. }
  968. }
  969. }
  970. if (!err && count > 0)
  971. err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
  972. count, block_to_free_p, p);
  973. if (err < 0)
  974. /* fatal error */
  975. return;
  976. if (this_bh) {
  977. BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
  978. /*
  979. * The buffer head should have an attached journal head at this
  980. * point. However, if the data is corrupted and an indirect
  981. * block pointed to itself, it would have been detached when
  982. * the block was cleared. Check for this instead of OOPSing.
  983. */
  984. if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
  985. ext4_handle_dirty_metadata(handle, inode, this_bh);
  986. else
  987. EXT4_ERROR_INODE(inode,
  988. "circular indirect block detected at "
  989. "block %llu",
  990. (unsigned long long) this_bh->b_blocknr);
  991. }
  992. }
  993. /**
  994. * ext4_free_branches - free an array of branches
  995. * @handle: JBD handle for this transaction
  996. * @inode: inode we are dealing with
  997. * @parent_bh: the buffer_head which contains *@first and *@last
  998. * @first: array of block numbers
  999. * @last: pointer immediately past the end of array
  1000. * @depth: depth of the branches to free
  1001. *
  1002. * We are freeing all blocks referred from these branches (numbers are
  1003. * stored as little-endian 32-bit) and updating @inode->i_blocks
  1004. * appropriately.
  1005. */
  1006. static void ext4_free_branches(handle_t *handle, struct inode *inode,
  1007. struct buffer_head *parent_bh,
  1008. __le32 *first, __le32 *last, int depth)
  1009. {
  1010. ext4_fsblk_t nr;
  1011. __le32 *p;
  1012. if (ext4_handle_is_aborted(handle))
  1013. return;
  1014. if (depth--) {
  1015. struct buffer_head *bh;
  1016. int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  1017. p = last;
  1018. while (--p >= first) {
  1019. nr = le32_to_cpu(*p);
  1020. if (!nr)
  1021. continue; /* A hole */
  1022. if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
  1023. nr, 1)) {
  1024. EXT4_ERROR_INODE(inode,
  1025. "invalid indirect mapped "
  1026. "block %lu (level %d)",
  1027. (unsigned long) nr, depth);
  1028. break;
  1029. }
  1030. /* Go read the buffer for the next level down */
  1031. bh = sb_bread(inode->i_sb, nr);
  1032. /*
  1033. * A read failure? Report error and clear slot
  1034. * (should be rare).
  1035. */
  1036. if (!bh) {
  1037. EXT4_ERROR_INODE_BLOCK(inode, nr,
  1038. "Read failure");
  1039. continue;
  1040. }
  1041. /* This zaps the entire block. Bottom up. */
  1042. BUFFER_TRACE(bh, "free child branches");
  1043. ext4_free_branches(handle, inode, bh,
  1044. (__le32 *) bh->b_data,
  1045. (__le32 *) bh->b_data + addr_per_block,
  1046. depth);
  1047. brelse(bh);
  1048. /*
  1049. * Everything below this this pointer has been
  1050. * released. Now let this top-of-subtree go.
  1051. *
  1052. * We want the freeing of this indirect block to be
  1053. * atomic in the journal with the updating of the
  1054. * bitmap block which owns it. So make some room in
  1055. * the journal.
  1056. *
  1057. * We zero the parent pointer *after* freeing its
  1058. * pointee in the bitmaps, so if extend_transaction()
  1059. * for some reason fails to put the bitmap changes and
  1060. * the release into the same transaction, recovery
  1061. * will merely complain about releasing a free block,
  1062. * rather than leaking blocks.
  1063. */
  1064. if (ext4_handle_is_aborted(handle))
  1065. return;
  1066. if (try_to_extend_transaction(handle, inode)) {
  1067. ext4_mark_inode_dirty(handle, inode);
  1068. ext4_truncate_restart_trans(handle, inode,
  1069. ext4_blocks_for_truncate(inode));
  1070. }
  1071. /*
  1072. * The forget flag here is critical because if
  1073. * we are journaling (and not doing data
  1074. * journaling), we have to make sure a revoke
  1075. * record is written to prevent the journal
  1076. * replay from overwriting the (former)
  1077. * indirect block if it gets reallocated as a
  1078. * data block. This must happen in the same
  1079. * transaction where the data blocks are
  1080. * actually freed.
  1081. */
  1082. ext4_free_blocks(handle, inode, NULL, nr, 1,
  1083. EXT4_FREE_BLOCKS_METADATA|
  1084. EXT4_FREE_BLOCKS_FORGET);
  1085. if (parent_bh) {
  1086. /*
  1087. * The block which we have just freed is
  1088. * pointed to by an indirect block: journal it
  1089. */
  1090. BUFFER_TRACE(parent_bh, "get_write_access");
  1091. if (!ext4_journal_get_write_access(handle,
  1092. parent_bh)){
  1093. *p = 0;
  1094. BUFFER_TRACE(parent_bh,
  1095. "call ext4_handle_dirty_metadata");
  1096. ext4_handle_dirty_metadata(handle,
  1097. inode,
  1098. parent_bh);
  1099. }
  1100. }
  1101. }
  1102. } else {
  1103. /* We have reached the bottom of the tree. */
  1104. BUFFER_TRACE(parent_bh, "free data blocks");
  1105. ext4_free_data(handle, inode, parent_bh, first, last);
  1106. }
  1107. }
  1108. void ext4_ind_truncate(handle_t *handle, struct inode *inode)
  1109. {
  1110. struct ext4_inode_info *ei = EXT4_I(inode);
  1111. __le32 *i_data = ei->i_data;
  1112. int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  1113. ext4_lblk_t offsets[4];
  1114. Indirect chain[4];
  1115. Indirect *partial;
  1116. __le32 nr = 0;
  1117. int n = 0;
  1118. ext4_lblk_t last_block, max_block;
  1119. unsigned blocksize = inode->i_sb->s_blocksize;
  1120. last_block = (inode->i_size + blocksize-1)
  1121. >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
  1122. max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
  1123. >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
  1124. if (last_block != max_block) {
  1125. n = ext4_block_to_path(inode, last_block, offsets, NULL);
  1126. if (n == 0)
  1127. return;
  1128. }
  1129. ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
  1130. /*
  1131. * The orphan list entry will now protect us from any crash which
  1132. * occurs before the truncate completes, so it is now safe to propagate
  1133. * the new, shorter inode size (held for now in i_size) into the
  1134. * on-disk inode. We do this via i_disksize, which is the value which
  1135. * ext4 *really* writes onto the disk inode.
  1136. */
  1137. ei->i_disksize = inode->i_size;
  1138. if (last_block == max_block) {
  1139. /*
  1140. * It is unnecessary to free any data blocks if last_block is
  1141. * equal to the indirect block limit.
  1142. */
  1143. return;
  1144. } else if (n == 1) { /* direct blocks */
  1145. ext4_free_data(handle, inode, NULL, i_data+offsets[0],
  1146. i_data + EXT4_NDIR_BLOCKS);
  1147. goto do_indirects;
  1148. }
  1149. partial = ext4_find_shared(inode, n, offsets, chain, &nr);
  1150. /* Kill the top of shared branch (not detached) */
  1151. if (nr) {
  1152. if (partial == chain) {
  1153. /* Shared branch grows from the inode */
  1154. ext4_free_branches(handle, inode, NULL,
  1155. &nr, &nr+1, (chain+n-1) - partial);
  1156. *partial->p = 0;
  1157. /*
  1158. * We mark the inode dirty prior to restart,
  1159. * and prior to stop. No need for it here.
  1160. */
  1161. } else {
  1162. /* Shared branch grows from an indirect block */
  1163. BUFFER_TRACE(partial->bh, "get_write_access");
  1164. ext4_free_branches(handle, inode, partial->bh,
  1165. partial->p,
  1166. partial->p+1, (chain+n-1) - partial);
  1167. }
  1168. }
  1169. /* Clear the ends of indirect blocks on the shared branch */
  1170. while (partial > chain) {
  1171. ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
  1172. (__le32*)partial->bh->b_data+addr_per_block,
  1173. (chain+n-1) - partial);
  1174. BUFFER_TRACE(partial->bh, "call brelse");
  1175. brelse(partial->bh);
  1176. partial--;
  1177. }
  1178. do_indirects:
  1179. /* Kill the remaining (whole) subtrees */
  1180. switch (offsets[0]) {
  1181. default:
  1182. nr = i_data[EXT4_IND_BLOCK];
  1183. if (nr) {
  1184. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
  1185. i_data[EXT4_IND_BLOCK] = 0;
  1186. }
  1187. case EXT4_IND_BLOCK:
  1188. nr = i_data[EXT4_DIND_BLOCK];
  1189. if (nr) {
  1190. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
  1191. i_data[EXT4_DIND_BLOCK] = 0;
  1192. }
  1193. case EXT4_DIND_BLOCK:
  1194. nr = i_data[EXT4_TIND_BLOCK];
  1195. if (nr) {
  1196. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
  1197. i_data[EXT4_TIND_BLOCK] = 0;
  1198. }
  1199. case EXT4_TIND_BLOCK:
  1200. ;
  1201. }
  1202. }
  1203. /**
  1204. * ext4_ind_remove_space - remove space from the range
  1205. * @handle: JBD handle for this transaction
  1206. * @inode: inode we are dealing with
  1207. * @start: First block to remove
  1208. * @end: One block after the last block to remove (exclusive)
  1209. *
  1210. * Free the blocks in the defined range (end is exclusive endpoint of
  1211. * range). This is used by ext4_punch_hole().
  1212. */
  1213. int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
  1214. ext4_lblk_t start, ext4_lblk_t end)
  1215. {
  1216. struct ext4_inode_info *ei = EXT4_I(inode);
  1217. __le32 *i_data = ei->i_data;
  1218. int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  1219. ext4_lblk_t offsets[4], offsets2[4];
  1220. Indirect chain[4], chain2[4];
  1221. Indirect *partial, *partial2;
  1222. ext4_lblk_t max_block;
  1223. __le32 nr = 0, nr2 = 0;
  1224. int n = 0, n2 = 0;
  1225. unsigned blocksize = inode->i_sb->s_blocksize;
  1226. max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
  1227. >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
  1228. if (end >= max_block)
  1229. end = max_block;
  1230. if ((start >= end) || (start > max_block))
  1231. return 0;
  1232. n = ext4_block_to_path(inode, start, offsets, NULL);
  1233. n2 = ext4_block_to_path(inode, end, offsets2, NULL);
  1234. BUG_ON(n > n2);
  1235. if ((n == 1) && (n == n2)) {
  1236. /* We're punching only within direct block range */
  1237. ext4_free_data(handle, inode, NULL, i_data + offsets[0],
  1238. i_data + offsets2[0]);
  1239. return 0;
  1240. } else if (n2 > n) {
  1241. /*
  1242. * Start and end are on a different levels so we're going to
  1243. * free partial block at start, and partial block at end of
  1244. * the range. If there are some levels in between then
  1245. * do_indirects label will take care of that.
  1246. */
  1247. if (n == 1) {
  1248. /*
  1249. * Start is at the direct block level, free
  1250. * everything to the end of the level.
  1251. */
  1252. ext4_free_data(handle, inode, NULL, i_data + offsets[0],
  1253. i_data + EXT4_NDIR_BLOCKS);
  1254. goto end_range;
  1255. }
  1256. partial = ext4_find_shared(inode, n, offsets, chain, &nr);
  1257. if (nr) {
  1258. if (partial == chain) {
  1259. /* Shared branch grows from the inode */
  1260. ext4_free_branches(handle, inode, NULL,
  1261. &nr, &nr+1, (chain+n-1) - partial);
  1262. *partial->p = 0;
  1263. } else {
  1264. /* Shared branch grows from an indirect block */
  1265. BUFFER_TRACE(partial->bh, "get_write_access");
  1266. ext4_free_branches(handle, inode, partial->bh,
  1267. partial->p,
  1268. partial->p+1, (chain+n-1) - partial);
  1269. }
  1270. }
  1271. /*
  1272. * Clear the ends of indirect blocks on the shared branch
  1273. * at the start of the range
  1274. */
  1275. while (partial > chain) {
  1276. ext4_free_branches(handle, inode, partial->bh,
  1277. partial->p + 1,
  1278. (__le32 *)partial->bh->b_data+addr_per_block,
  1279. (chain+n-1) - partial);
  1280. BUFFER_TRACE(partial->bh, "call brelse");
  1281. brelse(partial->bh);
  1282. partial--;
  1283. }
  1284. end_range:
  1285. partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
  1286. if (nr2) {
  1287. if (partial2 == chain2) {
  1288. /*
  1289. * Remember, end is exclusive so here we're at
  1290. * the start of the next level we're not going
  1291. * to free. Everything was covered by the start
  1292. * of the range.
  1293. */
  1294. goto do_indirects;
  1295. }
  1296. } else {
  1297. /*
  1298. * ext4_find_shared returns Indirect structure which
  1299. * points to the last element which should not be
  1300. * removed by truncate. But this is end of the range
  1301. * in punch_hole so we need to point to the next element
  1302. */
  1303. partial2->p++;
  1304. }
  1305. /*
  1306. * Clear the ends of indirect blocks on the shared branch
  1307. * at the end of the range
  1308. */
  1309. while (partial2 > chain2) {
  1310. ext4_free_branches(handle, inode, partial2->bh,
  1311. (__le32 *)partial2->bh->b_data,
  1312. partial2->p,
  1313. (chain2+n2-1) - partial2);
  1314. BUFFER_TRACE(partial2->bh, "call brelse");
  1315. brelse(partial2->bh);
  1316. partial2--;
  1317. }
  1318. goto do_indirects;
  1319. }
  1320. /* Punch happened within the same level (n == n2) */
  1321. partial = ext4_find_shared(inode, n, offsets, chain, &nr);
  1322. partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
  1323. /* Free top, but only if partial2 isn't its subtree. */
  1324. if (nr) {
  1325. int level = min(partial - chain, partial2 - chain2);
  1326. int i;
  1327. int subtree = 1;
  1328. for (i = 0; i <= level; i++) {
  1329. if (offsets[i] != offsets2[i]) {
  1330. subtree = 0;
  1331. break;
  1332. }
  1333. }
  1334. if (!subtree) {
  1335. if (partial == chain) {
  1336. /* Shared branch grows from the inode */
  1337. ext4_free_branches(handle, inode, NULL,
  1338. &nr, &nr+1,
  1339. (chain+n-1) - partial);
  1340. *partial->p = 0;
  1341. } else {
  1342. /* Shared branch grows from an indirect block */
  1343. BUFFER_TRACE(partial->bh, "get_write_access");
  1344. ext4_free_branches(handle, inode, partial->bh,
  1345. partial->p,
  1346. partial->p+1,
  1347. (chain+n-1) - partial);
  1348. }
  1349. }
  1350. }
  1351. if (!nr2) {
  1352. /*
  1353. * ext4_find_shared returns Indirect structure which
  1354. * points to the last element which should not be
  1355. * removed by truncate. But this is end of the range
  1356. * in punch_hole so we need to point to the next element
  1357. */
  1358. partial2->p++;
  1359. }
  1360. while (partial > chain || partial2 > chain2) {
  1361. int depth = (chain+n-1) - partial;
  1362. int depth2 = (chain2+n2-1) - partial2;
  1363. if (partial > chain && partial2 > chain2 &&
  1364. partial->bh->b_blocknr == partial2->bh->b_blocknr) {
  1365. /*
  1366. * We've converged on the same block. Clear the range,
  1367. * then we're done.
  1368. */
  1369. ext4_free_branches(handle, inode, partial->bh,
  1370. partial->p + 1,
  1371. partial2->p,
  1372. (chain+n-1) - partial);
  1373. BUFFER_TRACE(partial->bh, "call brelse");
  1374. brelse(partial->bh);
  1375. BUFFER_TRACE(partial2->bh, "call brelse");
  1376. brelse(partial2->bh);
  1377. return 0;
  1378. }
  1379. /*
  1380. * The start and end partial branches may not be at the same
  1381. * level even though the punch happened within one level. So, we
  1382. * give them a chance to arrive at the same level, then walk
  1383. * them in step with each other until we converge on the same
  1384. * block.
  1385. */
  1386. if (partial > chain && depth <= depth2) {
  1387. ext4_free_branches(handle, inode, partial->bh,
  1388. partial->p + 1,
  1389. (__le32 *)partial->bh->b_data+addr_per_block,
  1390. (chain+n-1) - partial);
  1391. BUFFER_TRACE(partial->bh, "call brelse");
  1392. brelse(partial->bh);
  1393. partial--;
  1394. }
  1395. if (partial2 > chain2 && depth2 <= depth) {
  1396. ext4_free_branches(handle, inode, partial2->bh,
  1397. (__le32 *)partial2->bh->b_data,
  1398. partial2->p,
  1399. (chain2+n2-1) - partial2);
  1400. BUFFER_TRACE(partial2->bh, "call brelse");
  1401. brelse(partial2->bh);
  1402. partial2--;
  1403. }
  1404. }
  1405. return 0;
  1406. do_indirects:
  1407. /* Kill the remaining (whole) subtrees */
  1408. switch (offsets[0]) {
  1409. default:
  1410. if (++n >= n2)
  1411. return 0;
  1412. nr = i_data[EXT4_IND_BLOCK];
  1413. if (nr) {
  1414. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
  1415. i_data[EXT4_IND_BLOCK] = 0;
  1416. }
  1417. case EXT4_IND_BLOCK:
  1418. if (++n >= n2)
  1419. return 0;
  1420. nr = i_data[EXT4_DIND_BLOCK];
  1421. if (nr) {
  1422. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
  1423. i_data[EXT4_DIND_BLOCK] = 0;
  1424. }
  1425. case EXT4_DIND_BLOCK:
  1426. if (++n >= n2)
  1427. return 0;
  1428. nr = i_data[EXT4_TIND_BLOCK];
  1429. if (nr) {
  1430. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
  1431. i_data[EXT4_TIND_BLOCK] = 0;
  1432. }
  1433. case EXT4_TIND_BLOCK:
  1434. ;
  1435. }
  1436. return 0;
  1437. }