extents_status.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127
  1. /*
  2. * fs/ext4/extents_status.c
  3. *
  4. * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
  5. * Modified by
  6. * Allison Henderson <achender@linux.vnet.ibm.com>
  7. * Hugh Dickins <hughd@google.com>
  8. * Zheng Liu <wenqing.lz@taobao.com>
  9. *
  10. * Ext4 extents status tree core functions.
  11. */
  12. #include <linux/rbtree.h>
  13. #include <linux/list_sort.h>
  14. #include "ext4.h"
  15. #include "extents_status.h"
  16. #include <trace/events/ext4.h>
  17. /*
  18. * According to previous discussion in Ext4 Developer Workshop, we
  19. * will introduce a new structure called io tree to track all extent
  20. * status in order to solve some problems that we have met
  21. * (e.g. Reservation space warning), and provide extent-level locking.
  22. * Delay extent tree is the first step to achieve this goal. It is
  23. * original built by Yongqiang Yang. At that time it is called delay
  24. * extent tree, whose goal is only track delayed extents in memory to
  25. * simplify the implementation of fiemap and bigalloc, and introduce
  26. * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
  27. * delay extent tree at the first commit. But for better understand
  28. * what it does, it has been rename to extent status tree.
  29. *
  30. * Step1:
  31. * Currently the first step has been done. All delayed extents are
  32. * tracked in the tree. It maintains the delayed extent when a delayed
  33. * allocation is issued, and the delayed extent is written out or
  34. * invalidated. Therefore the implementation of fiemap and bigalloc
  35. * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
  36. *
  37. * The following comment describes the implemenmtation of extent
  38. * status tree and future works.
  39. *
  40. * Step2:
  41. * In this step all extent status are tracked by extent status tree.
  42. * Thus, we can first try to lookup a block mapping in this tree before
  43. * finding it in extent tree. Hence, single extent cache can be removed
  44. * because extent status tree can do a better job. Extents in status
  45. * tree are loaded on-demand. Therefore, the extent status tree may not
  46. * contain all of the extents in a file. Meanwhile we define a shrinker
  47. * to reclaim memory from extent status tree because fragmented extent
  48. * tree will make status tree cost too much memory. written/unwritten/-
  49. * hole extents in the tree will be reclaimed by this shrinker when we
  50. * are under high memory pressure. Delayed extents will not be
  51. * reclimed because fiemap, bigalloc, and seek_data/hole need it.
  52. */
  53. /*
  54. * Extent status tree implementation for ext4.
  55. *
  56. *
  57. * ==========================================================================
  58. * Extent status tree tracks all extent status.
  59. *
  60. * 1. Why we need to implement extent status tree?
  61. *
  62. * Without extent status tree, ext4 identifies a delayed extent by looking
  63. * up page cache, this has several deficiencies - complicated, buggy,
  64. * and inefficient code.
  65. *
  66. * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
  67. * block or a range of blocks are belonged to a delayed extent.
  68. *
  69. * Let us have a look at how they do without extent status tree.
  70. * -- FIEMAP
  71. * FIEMAP looks up page cache to identify delayed allocations from holes.
  72. *
  73. * -- SEEK_HOLE/DATA
  74. * SEEK_HOLE/DATA has the same problem as FIEMAP.
  75. *
  76. * -- bigalloc
  77. * bigalloc looks up page cache to figure out if a block is
  78. * already under delayed allocation or not to determine whether
  79. * quota reserving is needed for the cluster.
  80. *
  81. * -- writeout
  82. * Writeout looks up whole page cache to see if a buffer is
  83. * mapped, If there are not very many delayed buffers, then it is
  84. * time comsuming.
  85. *
  86. * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
  87. * bigalloc and writeout can figure out if a block or a range of
  88. * blocks is under delayed allocation(belonged to a delayed extent) or
  89. * not by searching the extent tree.
  90. *
  91. *
  92. * ==========================================================================
  93. * 2. Ext4 extent status tree impelmentation
  94. *
  95. * -- extent
  96. * A extent is a range of blocks which are contiguous logically and
  97. * physically. Unlike extent in extent tree, this extent in ext4 is
  98. * a in-memory struct, there is no corresponding on-disk data. There
  99. * is no limit on length of extent, so an extent can contain as many
  100. * blocks as they are contiguous logically and physically.
  101. *
  102. * -- extent status tree
  103. * Every inode has an extent status tree and all allocation blocks
  104. * are added to the tree with different status. The extent in the
  105. * tree are ordered by logical block no.
  106. *
  107. * -- operations on a extent status tree
  108. * There are three important operations on a delayed extent tree: find
  109. * next extent, adding a extent(a range of blocks) and removing a extent.
  110. *
  111. * -- race on a extent status tree
  112. * Extent status tree is protected by inode->i_es_lock.
  113. *
  114. * -- memory consumption
  115. * Fragmented extent tree will make extent status tree cost too much
  116. * memory. Hence, we will reclaim written/unwritten/hole extents from
  117. * the tree under a heavy memory pressure.
  118. *
  119. *
  120. * ==========================================================================
  121. * 3. Performance analysis
  122. *
  123. * -- overhead
  124. * 1. There is a cache extent for write access, so if writes are
  125. * not very random, adding space operaions are in O(1) time.
  126. *
  127. * -- gain
  128. * 2. Code is much simpler, more readable, more maintainable and
  129. * more efficient.
  130. *
  131. *
  132. * ==========================================================================
  133. * 4. TODO list
  134. *
  135. * -- Refactor delayed space reservation
  136. *
  137. * -- Extent-level locking
  138. */
  139. static struct kmem_cache *ext4_es_cachep;
  140. static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
  141. static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
  142. ext4_lblk_t end);
  143. static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
  144. int nr_to_scan);
  145. static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
  146. struct ext4_inode_info *locked_ei);
  147. int __init ext4_init_es(void)
  148. {
  149. ext4_es_cachep = kmem_cache_create("ext4_extent_status",
  150. sizeof(struct extent_status),
  151. 0, (SLAB_RECLAIM_ACCOUNT), NULL);
  152. if (ext4_es_cachep == NULL)
  153. return -ENOMEM;
  154. return 0;
  155. }
  156. void ext4_exit_es(void)
  157. {
  158. if (ext4_es_cachep)
  159. kmem_cache_destroy(ext4_es_cachep);
  160. }
  161. void ext4_es_init_tree(struct ext4_es_tree *tree)
  162. {
  163. tree->root = RB_ROOT;
  164. tree->cache_es = NULL;
  165. }
  166. #ifdef ES_DEBUG__
  167. static void ext4_es_print_tree(struct inode *inode)
  168. {
  169. struct ext4_es_tree *tree;
  170. struct rb_node *node;
  171. printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
  172. tree = &EXT4_I(inode)->i_es_tree;
  173. node = rb_first(&tree->root);
  174. while (node) {
  175. struct extent_status *es;
  176. es = rb_entry(node, struct extent_status, rb_node);
  177. printk(KERN_DEBUG " [%u/%u) %llu %x",
  178. es->es_lblk, es->es_len,
  179. ext4_es_pblock(es), ext4_es_status(es));
  180. node = rb_next(node);
  181. }
  182. printk(KERN_DEBUG "\n");
  183. }
  184. #else
  185. #define ext4_es_print_tree(inode)
  186. #endif
  187. static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
  188. {
  189. BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
  190. return es->es_lblk + es->es_len - 1;
  191. }
  192. /*
  193. * search through the tree for an delayed extent with a given offset. If
  194. * it can't be found, try to find next extent.
  195. */
  196. static struct extent_status *__es_tree_search(struct rb_root *root,
  197. ext4_lblk_t lblk)
  198. {
  199. struct rb_node *node = root->rb_node;
  200. struct extent_status *es = NULL;
  201. while (node) {
  202. es = rb_entry(node, struct extent_status, rb_node);
  203. if (lblk < es->es_lblk)
  204. node = node->rb_left;
  205. else if (lblk > ext4_es_end(es))
  206. node = node->rb_right;
  207. else
  208. return es;
  209. }
  210. if (es && lblk < es->es_lblk)
  211. return es;
  212. if (es && lblk > ext4_es_end(es)) {
  213. node = rb_next(&es->rb_node);
  214. return node ? rb_entry(node, struct extent_status, rb_node) :
  215. NULL;
  216. }
  217. return NULL;
  218. }
  219. /*
  220. * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
  221. * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
  222. *
  223. * @inode: the inode which owns delayed extents
  224. * @lblk: the offset where we start to search
  225. * @end: the offset where we stop to search
  226. * @es: delayed extent that we found
  227. */
  228. void ext4_es_find_delayed_extent_range(struct inode *inode,
  229. ext4_lblk_t lblk, ext4_lblk_t end,
  230. struct extent_status *es)
  231. {
  232. struct ext4_es_tree *tree = NULL;
  233. struct extent_status *es1 = NULL;
  234. struct rb_node *node;
  235. BUG_ON(es == NULL);
  236. BUG_ON(end < lblk);
  237. trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
  238. read_lock(&EXT4_I(inode)->i_es_lock);
  239. tree = &EXT4_I(inode)->i_es_tree;
  240. /* find extent in cache firstly */
  241. es->es_lblk = es->es_len = es->es_pblk = 0;
  242. if (tree->cache_es) {
  243. es1 = tree->cache_es;
  244. if (in_range(lblk, es1->es_lblk, es1->es_len)) {
  245. es_debug("%u cached by [%u/%u) %llu %x\n",
  246. lblk, es1->es_lblk, es1->es_len,
  247. ext4_es_pblock(es1), ext4_es_status(es1));
  248. goto out;
  249. }
  250. }
  251. es1 = __es_tree_search(&tree->root, lblk);
  252. out:
  253. if (es1 && !ext4_es_is_delayed(es1)) {
  254. while ((node = rb_next(&es1->rb_node)) != NULL) {
  255. es1 = rb_entry(node, struct extent_status, rb_node);
  256. if (es1->es_lblk > end) {
  257. es1 = NULL;
  258. break;
  259. }
  260. if (ext4_es_is_delayed(es1))
  261. break;
  262. }
  263. }
  264. if (es1 && ext4_es_is_delayed(es1)) {
  265. tree->cache_es = es1;
  266. es->es_lblk = es1->es_lblk;
  267. es->es_len = es1->es_len;
  268. es->es_pblk = es1->es_pblk;
  269. }
  270. read_unlock(&EXT4_I(inode)->i_es_lock);
  271. trace_ext4_es_find_delayed_extent_range_exit(inode, es);
  272. }
  273. static struct extent_status *
  274. ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
  275. ext4_fsblk_t pblk)
  276. {
  277. struct extent_status *es;
  278. es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
  279. if (es == NULL)
  280. return NULL;
  281. es->es_lblk = lblk;
  282. es->es_len = len;
  283. es->es_pblk = pblk;
  284. /*
  285. * We don't count delayed extent because we never try to reclaim them
  286. */
  287. if (!ext4_es_is_delayed(es)) {
  288. EXT4_I(inode)->i_es_lru_nr++;
  289. percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
  290. }
  291. return es;
  292. }
  293. static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
  294. {
  295. /* Decrease the lru counter when this es is not delayed */
  296. if (!ext4_es_is_delayed(es)) {
  297. BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
  298. EXT4_I(inode)->i_es_lru_nr--;
  299. percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
  300. }
  301. kmem_cache_free(ext4_es_cachep, es);
  302. }
  303. /*
  304. * Check whether or not two extents can be merged
  305. * Condition:
  306. * - logical block number is contiguous
  307. * - physical block number is contiguous
  308. * - status is equal
  309. */
  310. static int ext4_es_can_be_merged(struct extent_status *es1,
  311. struct extent_status *es2)
  312. {
  313. if (ext4_es_status(es1) != ext4_es_status(es2))
  314. return 0;
  315. if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
  316. pr_warn("ES assertion failed when merging extents. "
  317. "The sum of lengths of es1 (%d) and es2 (%d) "
  318. "is bigger than allowed file size (%d)\n",
  319. es1->es_len, es2->es_len, EXT_MAX_BLOCKS);
  320. WARN_ON(1);
  321. return 0;
  322. }
  323. if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
  324. return 0;
  325. if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
  326. (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
  327. return 1;
  328. if (ext4_es_is_hole(es1))
  329. return 1;
  330. /* we need to check delayed extent is without unwritten status */
  331. if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
  332. return 1;
  333. return 0;
  334. }
  335. static struct extent_status *
  336. ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
  337. {
  338. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  339. struct extent_status *es1;
  340. struct rb_node *node;
  341. node = rb_prev(&es->rb_node);
  342. if (!node)
  343. return es;
  344. es1 = rb_entry(node, struct extent_status, rb_node);
  345. if (ext4_es_can_be_merged(es1, es)) {
  346. es1->es_len += es->es_len;
  347. rb_erase(&es->rb_node, &tree->root);
  348. ext4_es_free_extent(inode, es);
  349. es = es1;
  350. }
  351. return es;
  352. }
  353. static struct extent_status *
  354. ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
  355. {
  356. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  357. struct extent_status *es1;
  358. struct rb_node *node;
  359. node = rb_next(&es->rb_node);
  360. if (!node)
  361. return es;
  362. es1 = rb_entry(node, struct extent_status, rb_node);
  363. if (ext4_es_can_be_merged(es, es1)) {
  364. es->es_len += es1->es_len;
  365. rb_erase(node, &tree->root);
  366. ext4_es_free_extent(inode, es1);
  367. }
  368. return es;
  369. }
  370. #ifdef ES_AGGRESSIVE_TEST
  371. #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
  372. static void ext4_es_insert_extent_ext_check(struct inode *inode,
  373. struct extent_status *es)
  374. {
  375. struct ext4_ext_path *path = NULL;
  376. struct ext4_extent *ex;
  377. ext4_lblk_t ee_block;
  378. ext4_fsblk_t ee_start;
  379. unsigned short ee_len;
  380. int depth, ee_status, es_status;
  381. path = ext4_ext_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
  382. if (IS_ERR(path))
  383. return;
  384. depth = ext_depth(inode);
  385. ex = path[depth].p_ext;
  386. if (ex) {
  387. ee_block = le32_to_cpu(ex->ee_block);
  388. ee_start = ext4_ext_pblock(ex);
  389. ee_len = ext4_ext_get_actual_len(ex);
  390. ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
  391. es_status = ext4_es_is_unwritten(es) ? 1 : 0;
  392. /*
  393. * Make sure ex and es are not overlap when we try to insert
  394. * a delayed/hole extent.
  395. */
  396. if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
  397. if (in_range(es->es_lblk, ee_block, ee_len)) {
  398. pr_warn("ES insert assertion failed for "
  399. "inode: %lu we can find an extent "
  400. "at block [%d/%d/%llu/%c], but we "
  401. "want to add a delayed/hole extent "
  402. "[%d/%d/%llu/%x]\n",
  403. inode->i_ino, ee_block, ee_len,
  404. ee_start, ee_status ? 'u' : 'w',
  405. es->es_lblk, es->es_len,
  406. ext4_es_pblock(es), ext4_es_status(es));
  407. }
  408. goto out;
  409. }
  410. /*
  411. * We don't check ee_block == es->es_lblk, etc. because es
  412. * might be a part of whole extent, vice versa.
  413. */
  414. if (es->es_lblk < ee_block ||
  415. ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
  416. pr_warn("ES insert assertion failed for inode: %lu "
  417. "ex_status [%d/%d/%llu/%c] != "
  418. "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
  419. ee_block, ee_len, ee_start,
  420. ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
  421. ext4_es_pblock(es), es_status ? 'u' : 'w');
  422. goto out;
  423. }
  424. if (ee_status ^ es_status) {
  425. pr_warn("ES insert assertion failed for inode: %lu "
  426. "ex_status [%d/%d/%llu/%c] != "
  427. "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
  428. ee_block, ee_len, ee_start,
  429. ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
  430. ext4_es_pblock(es), es_status ? 'u' : 'w');
  431. }
  432. } else {
  433. /*
  434. * We can't find an extent on disk. So we need to make sure
  435. * that we don't want to add an written/unwritten extent.
  436. */
  437. if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
  438. pr_warn("ES insert assertion failed for inode: %lu "
  439. "can't find an extent at block %d but we want "
  440. "to add a written/unwritten extent "
  441. "[%d/%d/%llu/%x]\n", inode->i_ino,
  442. es->es_lblk, es->es_lblk, es->es_len,
  443. ext4_es_pblock(es), ext4_es_status(es));
  444. }
  445. }
  446. out:
  447. if (path) {
  448. ext4_ext_drop_refs(path);
  449. kfree(path);
  450. }
  451. }
  452. static void ext4_es_insert_extent_ind_check(struct inode *inode,
  453. struct extent_status *es)
  454. {
  455. struct ext4_map_blocks map;
  456. int retval;
  457. /*
  458. * Here we call ext4_ind_map_blocks to lookup a block mapping because
  459. * 'Indirect' structure is defined in indirect.c. So we couldn't
  460. * access direct/indirect tree from outside. It is too dirty to define
  461. * this function in indirect.c file.
  462. */
  463. map.m_lblk = es->es_lblk;
  464. map.m_len = es->es_len;
  465. retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
  466. if (retval > 0) {
  467. if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
  468. /*
  469. * We want to add a delayed/hole extent but this
  470. * block has been allocated.
  471. */
  472. pr_warn("ES insert assertion failed for inode: %lu "
  473. "We can find blocks but we want to add a "
  474. "delayed/hole extent [%d/%d/%llu/%x]\n",
  475. inode->i_ino, es->es_lblk, es->es_len,
  476. ext4_es_pblock(es), ext4_es_status(es));
  477. return;
  478. } else if (ext4_es_is_written(es)) {
  479. if (retval != es->es_len) {
  480. pr_warn("ES insert assertion failed for "
  481. "inode: %lu retval %d != es_len %d\n",
  482. inode->i_ino, retval, es->es_len);
  483. return;
  484. }
  485. if (map.m_pblk != ext4_es_pblock(es)) {
  486. pr_warn("ES insert assertion failed for "
  487. "inode: %lu m_pblk %llu != "
  488. "es_pblk %llu\n",
  489. inode->i_ino, map.m_pblk,
  490. ext4_es_pblock(es));
  491. return;
  492. }
  493. } else {
  494. /*
  495. * We don't need to check unwritten extent because
  496. * indirect-based file doesn't have it.
  497. */
  498. BUG_ON(1);
  499. }
  500. } else if (retval == 0) {
  501. if (ext4_es_is_written(es)) {
  502. pr_warn("ES insert assertion failed for inode: %lu "
  503. "We can't find the block but we want to add "
  504. "a written extent [%d/%d/%llu/%x]\n",
  505. inode->i_ino, es->es_lblk, es->es_len,
  506. ext4_es_pblock(es), ext4_es_status(es));
  507. return;
  508. }
  509. }
  510. }
  511. static inline void ext4_es_insert_extent_check(struct inode *inode,
  512. struct extent_status *es)
  513. {
  514. /*
  515. * We don't need to worry about the race condition because
  516. * caller takes i_data_sem locking.
  517. */
  518. BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
  519. if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
  520. ext4_es_insert_extent_ext_check(inode, es);
  521. else
  522. ext4_es_insert_extent_ind_check(inode, es);
  523. }
  524. #else
  525. static inline void ext4_es_insert_extent_check(struct inode *inode,
  526. struct extent_status *es)
  527. {
  528. }
  529. #endif
  530. static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
  531. {
  532. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  533. struct rb_node **p = &tree->root.rb_node;
  534. struct rb_node *parent = NULL;
  535. struct extent_status *es;
  536. while (*p) {
  537. parent = *p;
  538. es = rb_entry(parent, struct extent_status, rb_node);
  539. if (newes->es_lblk < es->es_lblk) {
  540. if (ext4_es_can_be_merged(newes, es)) {
  541. /*
  542. * Here we can modify es_lblk directly
  543. * because it isn't overlapped.
  544. */
  545. es->es_lblk = newes->es_lblk;
  546. es->es_len += newes->es_len;
  547. if (ext4_es_is_written(es) ||
  548. ext4_es_is_unwritten(es))
  549. ext4_es_store_pblock(es,
  550. newes->es_pblk);
  551. es = ext4_es_try_to_merge_left(inode, es);
  552. goto out;
  553. }
  554. p = &(*p)->rb_left;
  555. } else if (newes->es_lblk > ext4_es_end(es)) {
  556. if (ext4_es_can_be_merged(es, newes)) {
  557. es->es_len += newes->es_len;
  558. es = ext4_es_try_to_merge_right(inode, es);
  559. goto out;
  560. }
  561. p = &(*p)->rb_right;
  562. } else {
  563. BUG_ON(1);
  564. return -EINVAL;
  565. }
  566. }
  567. es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
  568. newes->es_pblk);
  569. if (!es)
  570. return -ENOMEM;
  571. rb_link_node(&es->rb_node, parent, p);
  572. rb_insert_color(&es->rb_node, &tree->root);
  573. out:
  574. tree->cache_es = es;
  575. return 0;
  576. }
  577. /*
  578. * ext4_es_insert_extent() adds information to an inode's extent
  579. * status tree.
  580. *
  581. * Return 0 on success, error code on failure.
  582. */
  583. int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
  584. ext4_lblk_t len, ext4_fsblk_t pblk,
  585. unsigned int status)
  586. {
  587. struct extent_status newes;
  588. ext4_lblk_t end = lblk + len - 1;
  589. int err = 0;
  590. es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
  591. lblk, len, pblk, status, inode->i_ino);
  592. if (!len)
  593. return 0;
  594. BUG_ON(end < lblk);
  595. newes.es_lblk = lblk;
  596. newes.es_len = len;
  597. ext4_es_store_pblock_status(&newes, pblk, status);
  598. trace_ext4_es_insert_extent(inode, &newes);
  599. ext4_es_insert_extent_check(inode, &newes);
  600. write_lock(&EXT4_I(inode)->i_es_lock);
  601. err = __es_remove_extent(inode, lblk, end);
  602. if (err != 0)
  603. goto error;
  604. retry:
  605. err = __es_insert_extent(inode, &newes);
  606. if (err == -ENOMEM && __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
  607. EXT4_I(inode)))
  608. goto retry;
  609. if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
  610. err = 0;
  611. error:
  612. write_unlock(&EXT4_I(inode)->i_es_lock);
  613. ext4_es_print_tree(inode);
  614. return err;
  615. }
  616. /*
  617. * ext4_es_cache_extent() inserts information into the extent status
  618. * tree if and only if there isn't information about the range in
  619. * question already.
  620. */
  621. void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
  622. ext4_lblk_t len, ext4_fsblk_t pblk,
  623. unsigned int status)
  624. {
  625. struct extent_status *es;
  626. struct extent_status newes;
  627. ext4_lblk_t end = lblk + len - 1;
  628. newes.es_lblk = lblk;
  629. newes.es_len = len;
  630. ext4_es_store_pblock_status(&newes, pblk, status);
  631. trace_ext4_es_cache_extent(inode, &newes);
  632. if (!len)
  633. return;
  634. BUG_ON(end < lblk);
  635. write_lock(&EXT4_I(inode)->i_es_lock);
  636. es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
  637. if (!es || es->es_lblk > end)
  638. __es_insert_extent(inode, &newes);
  639. write_unlock(&EXT4_I(inode)->i_es_lock);
  640. }
  641. /*
  642. * ext4_es_lookup_extent() looks up an extent in extent status tree.
  643. *
  644. * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
  645. *
  646. * Return: 1 on found, 0 on not
  647. */
  648. int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
  649. struct extent_status *es)
  650. {
  651. struct ext4_es_tree *tree;
  652. struct extent_status *es1 = NULL;
  653. struct rb_node *node;
  654. int found = 0;
  655. trace_ext4_es_lookup_extent_enter(inode, lblk);
  656. es_debug("lookup extent in block %u\n", lblk);
  657. tree = &EXT4_I(inode)->i_es_tree;
  658. read_lock(&EXT4_I(inode)->i_es_lock);
  659. /* find extent in cache firstly */
  660. es->es_lblk = es->es_len = es->es_pblk = 0;
  661. if (tree->cache_es) {
  662. es1 = tree->cache_es;
  663. if (in_range(lblk, es1->es_lblk, es1->es_len)) {
  664. es_debug("%u cached by [%u/%u)\n",
  665. lblk, es1->es_lblk, es1->es_len);
  666. found = 1;
  667. goto out;
  668. }
  669. }
  670. node = tree->root.rb_node;
  671. while (node) {
  672. es1 = rb_entry(node, struct extent_status, rb_node);
  673. if (lblk < es1->es_lblk)
  674. node = node->rb_left;
  675. else if (lblk > ext4_es_end(es1))
  676. node = node->rb_right;
  677. else {
  678. found = 1;
  679. break;
  680. }
  681. }
  682. out:
  683. if (found) {
  684. BUG_ON(!es1);
  685. es->es_lblk = es1->es_lblk;
  686. es->es_len = es1->es_len;
  687. es->es_pblk = es1->es_pblk;
  688. }
  689. read_unlock(&EXT4_I(inode)->i_es_lock);
  690. trace_ext4_es_lookup_extent_exit(inode, es, found);
  691. return found;
  692. }
  693. static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
  694. ext4_lblk_t end)
  695. {
  696. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  697. struct rb_node *node;
  698. struct extent_status *es;
  699. struct extent_status orig_es;
  700. ext4_lblk_t len1, len2;
  701. ext4_fsblk_t block;
  702. int err;
  703. retry:
  704. err = 0;
  705. es = __es_tree_search(&tree->root, lblk);
  706. if (!es)
  707. goto out;
  708. if (es->es_lblk > end)
  709. goto out;
  710. /* Simply invalidate cache_es. */
  711. tree->cache_es = NULL;
  712. orig_es.es_lblk = es->es_lblk;
  713. orig_es.es_len = es->es_len;
  714. orig_es.es_pblk = es->es_pblk;
  715. len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
  716. len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
  717. if (len1 > 0)
  718. es->es_len = len1;
  719. if (len2 > 0) {
  720. if (len1 > 0) {
  721. struct extent_status newes;
  722. newes.es_lblk = end + 1;
  723. newes.es_len = len2;
  724. block = 0x7FDEADBEEFULL;
  725. if (ext4_es_is_written(&orig_es) ||
  726. ext4_es_is_unwritten(&orig_es))
  727. block = ext4_es_pblock(&orig_es) +
  728. orig_es.es_len - len2;
  729. ext4_es_store_pblock_status(&newes, block,
  730. ext4_es_status(&orig_es));
  731. err = __es_insert_extent(inode, &newes);
  732. if (err) {
  733. es->es_lblk = orig_es.es_lblk;
  734. es->es_len = orig_es.es_len;
  735. if ((err == -ENOMEM) &&
  736. __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
  737. EXT4_I(inode)))
  738. goto retry;
  739. goto out;
  740. }
  741. } else {
  742. es->es_lblk = end + 1;
  743. es->es_len = len2;
  744. if (ext4_es_is_written(es) ||
  745. ext4_es_is_unwritten(es)) {
  746. block = orig_es.es_pblk + orig_es.es_len - len2;
  747. ext4_es_store_pblock(es, block);
  748. }
  749. }
  750. goto out;
  751. }
  752. if (len1 > 0) {
  753. node = rb_next(&es->rb_node);
  754. if (node)
  755. es = rb_entry(node, struct extent_status, rb_node);
  756. else
  757. es = NULL;
  758. }
  759. while (es && ext4_es_end(es) <= end) {
  760. node = rb_next(&es->rb_node);
  761. rb_erase(&es->rb_node, &tree->root);
  762. ext4_es_free_extent(inode, es);
  763. if (!node) {
  764. es = NULL;
  765. break;
  766. }
  767. es = rb_entry(node, struct extent_status, rb_node);
  768. }
  769. if (es && es->es_lblk < end + 1) {
  770. ext4_lblk_t orig_len = es->es_len;
  771. len1 = ext4_es_end(es) - end;
  772. es->es_lblk = end + 1;
  773. es->es_len = len1;
  774. if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
  775. block = es->es_pblk + orig_len - len1;
  776. ext4_es_store_pblock(es, block);
  777. }
  778. }
  779. out:
  780. return err;
  781. }
  782. /*
  783. * ext4_es_remove_extent() removes a space from a extent status tree.
  784. *
  785. * Return 0 on success, error code on failure.
  786. */
  787. int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
  788. ext4_lblk_t len)
  789. {
  790. ext4_lblk_t end;
  791. int err = 0;
  792. trace_ext4_es_remove_extent(inode, lblk, len);
  793. es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
  794. lblk, len, inode->i_ino);
  795. if (!len)
  796. return err;
  797. end = lblk + len - 1;
  798. BUG_ON(end < lblk);
  799. write_lock(&EXT4_I(inode)->i_es_lock);
  800. err = __es_remove_extent(inode, lblk, end);
  801. write_unlock(&EXT4_I(inode)->i_es_lock);
  802. ext4_es_print_tree(inode);
  803. return err;
  804. }
  805. static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
  806. struct list_head *b)
  807. {
  808. struct ext4_inode_info *eia, *eib;
  809. eia = list_entry(a, struct ext4_inode_info, i_es_lru);
  810. eib = list_entry(b, struct ext4_inode_info, i_es_lru);
  811. if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
  812. !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
  813. return 1;
  814. if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
  815. ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
  816. return -1;
  817. if (eia->i_touch_when == eib->i_touch_when)
  818. return 0;
  819. if (time_after(eia->i_touch_when, eib->i_touch_when))
  820. return 1;
  821. else
  822. return -1;
  823. }
  824. static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
  825. struct ext4_inode_info *locked_ei)
  826. {
  827. struct ext4_inode_info *ei;
  828. struct list_head *cur, *tmp;
  829. LIST_HEAD(skipped);
  830. int nr_shrunk = 0;
  831. int retried = 0, skip_precached = 1, nr_skipped = 0;
  832. spin_lock(&sbi->s_es_lru_lock);
  833. retry:
  834. list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
  835. int shrunk;
  836. /*
  837. * If we have already reclaimed all extents from extent
  838. * status tree, just stop the loop immediately.
  839. */
  840. if (percpu_counter_read_positive(&sbi->s_extent_cache_cnt) == 0)
  841. break;
  842. ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
  843. /*
  844. * Skip the inode that is newer than the last_sorted
  845. * time. Normally we try hard to avoid shrinking
  846. * precached inodes, but we will as a last resort.
  847. */
  848. if ((sbi->s_es_last_sorted < ei->i_touch_when) ||
  849. (skip_precached && ext4_test_inode_state(&ei->vfs_inode,
  850. EXT4_STATE_EXT_PRECACHED))) {
  851. nr_skipped++;
  852. list_move_tail(cur, &skipped);
  853. continue;
  854. }
  855. if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
  856. !write_trylock(&ei->i_es_lock))
  857. continue;
  858. shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
  859. if (ei->i_es_lru_nr == 0)
  860. list_del_init(&ei->i_es_lru);
  861. write_unlock(&ei->i_es_lock);
  862. nr_shrunk += shrunk;
  863. nr_to_scan -= shrunk;
  864. if (nr_to_scan == 0)
  865. break;
  866. }
  867. /* Move the newer inodes into the tail of the LRU list. */
  868. list_splice_tail(&skipped, &sbi->s_es_lru);
  869. INIT_LIST_HEAD(&skipped);
  870. /*
  871. * If we skipped any inodes, and we weren't able to make any
  872. * forward progress, sort the list and try again.
  873. */
  874. if ((nr_shrunk == 0) && nr_skipped && !retried) {
  875. retried++;
  876. list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
  877. sbi->s_es_last_sorted = jiffies;
  878. ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
  879. i_es_lru);
  880. /*
  881. * If there are no non-precached inodes left on the
  882. * list, start releasing precached extents.
  883. */
  884. if (ext4_test_inode_state(&ei->vfs_inode,
  885. EXT4_STATE_EXT_PRECACHED))
  886. skip_precached = 0;
  887. goto retry;
  888. }
  889. spin_unlock(&sbi->s_es_lru_lock);
  890. if (locked_ei && nr_shrunk == 0)
  891. nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan);
  892. return nr_shrunk;
  893. }
  894. static unsigned long ext4_es_count(struct shrinker *shrink,
  895. struct shrink_control *sc)
  896. {
  897. unsigned long nr;
  898. struct ext4_sb_info *sbi;
  899. sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
  900. nr = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
  901. trace_ext4_es_shrink_enter(sbi->s_sb, sc->nr_to_scan, nr);
  902. return nr;
  903. }
  904. static unsigned long ext4_es_scan(struct shrinker *shrink,
  905. struct shrink_control *sc)
  906. {
  907. struct ext4_sb_info *sbi = container_of(shrink,
  908. struct ext4_sb_info, s_es_shrinker);
  909. int nr_to_scan = sc->nr_to_scan;
  910. int ret, nr_shrunk;
  911. ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
  912. trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
  913. if (!nr_to_scan)
  914. return ret;
  915. nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
  916. trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
  917. return nr_shrunk;
  918. }
  919. void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
  920. {
  921. INIT_LIST_HEAD(&sbi->s_es_lru);
  922. spin_lock_init(&sbi->s_es_lru_lock);
  923. sbi->s_es_last_sorted = 0;
  924. sbi->s_es_shrinker.scan_objects = ext4_es_scan;
  925. sbi->s_es_shrinker.count_objects = ext4_es_count;
  926. sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
  927. register_shrinker(&sbi->s_es_shrinker);
  928. }
  929. void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
  930. {
  931. unregister_shrinker(&sbi->s_es_shrinker);
  932. }
  933. void ext4_es_lru_add(struct inode *inode)
  934. {
  935. struct ext4_inode_info *ei = EXT4_I(inode);
  936. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  937. ei->i_touch_when = jiffies;
  938. if (!list_empty(&ei->i_es_lru))
  939. return;
  940. spin_lock(&sbi->s_es_lru_lock);
  941. if (list_empty(&ei->i_es_lru))
  942. list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
  943. spin_unlock(&sbi->s_es_lru_lock);
  944. }
  945. void ext4_es_lru_del(struct inode *inode)
  946. {
  947. struct ext4_inode_info *ei = EXT4_I(inode);
  948. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  949. spin_lock(&sbi->s_es_lru_lock);
  950. if (!list_empty(&ei->i_es_lru))
  951. list_del_init(&ei->i_es_lru);
  952. spin_unlock(&sbi->s_es_lru_lock);
  953. }
  954. static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
  955. int nr_to_scan)
  956. {
  957. struct inode *inode = &ei->vfs_inode;
  958. struct ext4_es_tree *tree = &ei->i_es_tree;
  959. struct rb_node *node;
  960. struct extent_status *es;
  961. unsigned long nr_shrunk = 0;
  962. static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
  963. DEFAULT_RATELIMIT_BURST);
  964. if (ei->i_es_lru_nr == 0)
  965. return 0;
  966. if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
  967. __ratelimit(&_rs))
  968. ext4_warning(inode->i_sb, "forced shrink of precached extents");
  969. node = rb_first(&tree->root);
  970. while (node != NULL) {
  971. es = rb_entry(node, struct extent_status, rb_node);
  972. node = rb_next(&es->rb_node);
  973. /*
  974. * We can't reclaim delayed extent from status tree because
  975. * fiemap, bigallic, and seek_data/hole need to use it.
  976. */
  977. if (!ext4_es_is_delayed(es)) {
  978. rb_erase(&es->rb_node, &tree->root);
  979. ext4_es_free_extent(inode, es);
  980. nr_shrunk++;
  981. if (--nr_to_scan == 0)
  982. break;
  983. }
  984. }
  985. tree->cache_es = NULL;
  986. return nr_shrunk;
  987. }