extents_status.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304
  1. /*
  2. * fs/ext4/extents_status.c
  3. *
  4. * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
  5. * Modified by
  6. * Allison Henderson <achender@linux.vnet.ibm.com>
  7. * Hugh Dickins <hughd@google.com>
  8. * Zheng Liu <wenqing.lz@taobao.com>
  9. *
  10. * Ext4 extents status tree core functions.
  11. */
  12. #include <linux/rbtree.h>
  13. #include <linux/list_sort.h>
  14. #include <linux/proc_fs.h>
  15. #include <linux/seq_file.h>
  16. #include "ext4.h"
  17. #include "extents_status.h"
  18. #include <trace/events/ext4.h>
  19. /*
  20. * According to previous discussion in Ext4 Developer Workshop, we
  21. * will introduce a new structure called io tree to track all extent
  22. * status in order to solve some problems that we have met
  23. * (e.g. Reservation space warning), and provide extent-level locking.
  24. * Delay extent tree is the first step to achieve this goal. It is
  25. * original built by Yongqiang Yang. At that time it is called delay
  26. * extent tree, whose goal is only track delayed extents in memory to
  27. * simplify the implementation of fiemap and bigalloc, and introduce
  28. * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
  29. * delay extent tree at the first commit. But for better understand
  30. * what it does, it has been rename to extent status tree.
  31. *
  32. * Step1:
  33. * Currently the first step has been done. All delayed extents are
  34. * tracked in the tree. It maintains the delayed extent when a delayed
  35. * allocation is issued, and the delayed extent is written out or
  36. * invalidated. Therefore the implementation of fiemap and bigalloc
  37. * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
  38. *
  39. * The following comment describes the implemenmtation of extent
  40. * status tree and future works.
  41. *
  42. * Step2:
  43. * In this step all extent status are tracked by extent status tree.
  44. * Thus, we can first try to lookup a block mapping in this tree before
  45. * finding it in extent tree. Hence, single extent cache can be removed
  46. * because extent status tree can do a better job. Extents in status
  47. * tree are loaded on-demand. Therefore, the extent status tree may not
  48. * contain all of the extents in a file. Meanwhile we define a shrinker
  49. * to reclaim memory from extent status tree because fragmented extent
  50. * tree will make status tree cost too much memory. written/unwritten/-
  51. * hole extents in the tree will be reclaimed by this shrinker when we
  52. * are under high memory pressure. Delayed extents will not be
  53. * reclimed because fiemap, bigalloc, and seek_data/hole need it.
  54. */
  55. /*
  56. * Extent status tree implementation for ext4.
  57. *
  58. *
  59. * ==========================================================================
  60. * Extent status tree tracks all extent status.
  61. *
  62. * 1. Why we need to implement extent status tree?
  63. *
  64. * Without extent status tree, ext4 identifies a delayed extent by looking
  65. * up page cache, this has several deficiencies - complicated, buggy,
  66. * and inefficient code.
  67. *
  68. * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
  69. * block or a range of blocks are belonged to a delayed extent.
  70. *
  71. * Let us have a look at how they do without extent status tree.
  72. * -- FIEMAP
  73. * FIEMAP looks up page cache to identify delayed allocations from holes.
  74. *
  75. * -- SEEK_HOLE/DATA
  76. * SEEK_HOLE/DATA has the same problem as FIEMAP.
  77. *
  78. * -- bigalloc
  79. * bigalloc looks up page cache to figure out if a block is
  80. * already under delayed allocation or not to determine whether
  81. * quota reserving is needed for the cluster.
  82. *
  83. * -- writeout
  84. * Writeout looks up whole page cache to see if a buffer is
  85. * mapped, If there are not very many delayed buffers, then it is
  86. * time comsuming.
  87. *
  88. * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
  89. * bigalloc and writeout can figure out if a block or a range of
  90. * blocks is under delayed allocation(belonged to a delayed extent) or
  91. * not by searching the extent tree.
  92. *
  93. *
  94. * ==========================================================================
  95. * 2. Ext4 extent status tree impelmentation
  96. *
  97. * -- extent
  98. * A extent is a range of blocks which are contiguous logically and
  99. * physically. Unlike extent in extent tree, this extent in ext4 is
  100. * a in-memory struct, there is no corresponding on-disk data. There
  101. * is no limit on length of extent, so an extent can contain as many
  102. * blocks as they are contiguous logically and physically.
  103. *
  104. * -- extent status tree
  105. * Every inode has an extent status tree and all allocation blocks
  106. * are added to the tree with different status. The extent in the
  107. * tree are ordered by logical block no.
  108. *
  109. * -- operations on a extent status tree
  110. * There are three important operations on a delayed extent tree: find
  111. * next extent, adding a extent(a range of blocks) and removing a extent.
  112. *
  113. * -- race on a extent status tree
  114. * Extent status tree is protected by inode->i_es_lock.
  115. *
  116. * -- memory consumption
  117. * Fragmented extent tree will make extent status tree cost too much
  118. * memory. Hence, we will reclaim written/unwritten/hole extents from
  119. * the tree under a heavy memory pressure.
  120. *
  121. *
  122. * ==========================================================================
  123. * 3. Performance analysis
  124. *
  125. * -- overhead
  126. * 1. There is a cache extent for write access, so if writes are
  127. * not very random, adding space operaions are in O(1) time.
  128. *
  129. * -- gain
  130. * 2. Code is much simpler, more readable, more maintainable and
  131. * more efficient.
  132. *
  133. *
  134. * ==========================================================================
  135. * 4. TODO list
  136. *
  137. * -- Refactor delayed space reservation
  138. *
  139. * -- Extent-level locking
  140. */
  141. static struct kmem_cache *ext4_es_cachep;
  142. static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
  143. static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
  144. ext4_lblk_t end);
  145. static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
  146. static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
  147. struct ext4_inode_info *locked_ei);
  148. int __init ext4_init_es(void)
  149. {
  150. ext4_es_cachep = kmem_cache_create("ext4_extent_status",
  151. sizeof(struct extent_status),
  152. 0, (SLAB_RECLAIM_ACCOUNT), NULL);
  153. if (ext4_es_cachep == NULL)
  154. return -ENOMEM;
  155. return 0;
  156. }
  157. void ext4_exit_es(void)
  158. {
  159. if (ext4_es_cachep)
  160. kmem_cache_destroy(ext4_es_cachep);
  161. }
  162. void ext4_es_init_tree(struct ext4_es_tree *tree)
  163. {
  164. tree->root = RB_ROOT;
  165. tree->cache_es = NULL;
  166. }
  167. #ifdef ES_DEBUG__
  168. static void ext4_es_print_tree(struct inode *inode)
  169. {
  170. struct ext4_es_tree *tree;
  171. struct rb_node *node;
  172. printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
  173. tree = &EXT4_I(inode)->i_es_tree;
  174. node = rb_first(&tree->root);
  175. while (node) {
  176. struct extent_status *es;
  177. es = rb_entry(node, struct extent_status, rb_node);
  178. printk(KERN_DEBUG " [%u/%u) %llu %x",
  179. es->es_lblk, es->es_len,
  180. ext4_es_pblock(es), ext4_es_status(es));
  181. node = rb_next(node);
  182. }
  183. printk(KERN_DEBUG "\n");
  184. }
  185. #else
  186. #define ext4_es_print_tree(inode)
  187. #endif
  188. static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
  189. {
  190. BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
  191. return es->es_lblk + es->es_len - 1;
  192. }
  193. /*
  194. * search through the tree for an delayed extent with a given offset. If
  195. * it can't be found, try to find next extent.
  196. */
  197. static struct extent_status *__es_tree_search(struct rb_root *root,
  198. ext4_lblk_t lblk)
  199. {
  200. struct rb_node *node = root->rb_node;
  201. struct extent_status *es = NULL;
  202. while (node) {
  203. es = rb_entry(node, struct extent_status, rb_node);
  204. if (lblk < es->es_lblk)
  205. node = node->rb_left;
  206. else if (lblk > ext4_es_end(es))
  207. node = node->rb_right;
  208. else
  209. return es;
  210. }
  211. if (es && lblk < es->es_lblk)
  212. return es;
  213. if (es && lblk > ext4_es_end(es)) {
  214. node = rb_next(&es->rb_node);
  215. return node ? rb_entry(node, struct extent_status, rb_node) :
  216. NULL;
  217. }
  218. return NULL;
  219. }
  220. /*
  221. * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
  222. * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
  223. *
  224. * @inode: the inode which owns delayed extents
  225. * @lblk: the offset where we start to search
  226. * @end: the offset where we stop to search
  227. * @es: delayed extent that we found
  228. */
  229. void ext4_es_find_delayed_extent_range(struct inode *inode,
  230. ext4_lblk_t lblk, ext4_lblk_t end,
  231. struct extent_status *es)
  232. {
  233. struct ext4_es_tree *tree = NULL;
  234. struct extent_status *es1 = NULL;
  235. struct rb_node *node;
  236. BUG_ON(es == NULL);
  237. BUG_ON(end < lblk);
  238. trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
  239. read_lock(&EXT4_I(inode)->i_es_lock);
  240. tree = &EXT4_I(inode)->i_es_tree;
  241. /* find extent in cache firstly */
  242. es->es_lblk = es->es_len = es->es_pblk = 0;
  243. if (tree->cache_es) {
  244. es1 = tree->cache_es;
  245. if (in_range(lblk, es1->es_lblk, es1->es_len)) {
  246. es_debug("%u cached by [%u/%u) %llu %x\n",
  247. lblk, es1->es_lblk, es1->es_len,
  248. ext4_es_pblock(es1), ext4_es_status(es1));
  249. goto out;
  250. }
  251. }
  252. es1 = __es_tree_search(&tree->root, lblk);
  253. out:
  254. if (es1 && !ext4_es_is_delayed(es1)) {
  255. while ((node = rb_next(&es1->rb_node)) != NULL) {
  256. es1 = rb_entry(node, struct extent_status, rb_node);
  257. if (es1->es_lblk > end) {
  258. es1 = NULL;
  259. break;
  260. }
  261. if (ext4_es_is_delayed(es1))
  262. break;
  263. }
  264. }
  265. if (es1 && ext4_es_is_delayed(es1)) {
  266. tree->cache_es = es1;
  267. es->es_lblk = es1->es_lblk;
  268. es->es_len = es1->es_len;
  269. es->es_pblk = es1->es_pblk;
  270. }
  271. read_unlock(&EXT4_I(inode)->i_es_lock);
  272. trace_ext4_es_find_delayed_extent_range_exit(inode, es);
  273. }
  274. static void ext4_es_list_add(struct inode *inode)
  275. {
  276. struct ext4_inode_info *ei = EXT4_I(inode);
  277. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  278. if (!list_empty(&ei->i_es_list))
  279. return;
  280. spin_lock(&sbi->s_es_lock);
  281. if (list_empty(&ei->i_es_list)) {
  282. list_add_tail(&ei->i_es_list, &sbi->s_es_list);
  283. sbi->s_es_nr_inode++;
  284. }
  285. spin_unlock(&sbi->s_es_lock);
  286. }
  287. static void ext4_es_list_del(struct inode *inode)
  288. {
  289. struct ext4_inode_info *ei = EXT4_I(inode);
  290. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  291. spin_lock(&sbi->s_es_lock);
  292. if (!list_empty(&ei->i_es_list)) {
  293. list_del_init(&ei->i_es_list);
  294. sbi->s_es_nr_inode--;
  295. WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
  296. }
  297. spin_unlock(&sbi->s_es_lock);
  298. }
  299. static struct extent_status *
  300. ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
  301. ext4_fsblk_t pblk)
  302. {
  303. struct extent_status *es;
  304. es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
  305. if (es == NULL)
  306. return NULL;
  307. es->es_lblk = lblk;
  308. es->es_len = len;
  309. es->es_pblk = pblk;
  310. /*
  311. * We don't count delayed extent because we never try to reclaim them
  312. */
  313. if (!ext4_es_is_delayed(es)) {
  314. if (!EXT4_I(inode)->i_es_shk_nr++)
  315. ext4_es_list_add(inode);
  316. percpu_counter_inc(&EXT4_SB(inode->i_sb)->
  317. s_es_stats.es_stats_shk_cnt);
  318. }
  319. EXT4_I(inode)->i_es_all_nr++;
  320. percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
  321. return es;
  322. }
  323. static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
  324. {
  325. EXT4_I(inode)->i_es_all_nr--;
  326. percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
  327. /* Decrease the shrink counter when this es is not delayed */
  328. if (!ext4_es_is_delayed(es)) {
  329. BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
  330. if (!--EXT4_I(inode)->i_es_shk_nr)
  331. ext4_es_list_del(inode);
  332. percpu_counter_dec(&EXT4_SB(inode->i_sb)->
  333. s_es_stats.es_stats_shk_cnt);
  334. }
  335. kmem_cache_free(ext4_es_cachep, es);
  336. }
  337. /*
  338. * Check whether or not two extents can be merged
  339. * Condition:
  340. * - logical block number is contiguous
  341. * - physical block number is contiguous
  342. * - status is equal
  343. */
  344. static int ext4_es_can_be_merged(struct extent_status *es1,
  345. struct extent_status *es2)
  346. {
  347. if (ext4_es_type(es1) != ext4_es_type(es2))
  348. return 0;
  349. if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
  350. pr_warn("ES assertion failed when merging extents. "
  351. "The sum of lengths of es1 (%d) and es2 (%d) "
  352. "is bigger than allowed file size (%d)\n",
  353. es1->es_len, es2->es_len, EXT_MAX_BLOCKS);
  354. WARN_ON(1);
  355. return 0;
  356. }
  357. if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
  358. return 0;
  359. if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
  360. (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
  361. return 1;
  362. if (ext4_es_is_hole(es1))
  363. return 1;
  364. /* we need to check delayed extent is without unwritten status */
  365. if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
  366. return 1;
  367. return 0;
  368. }
  369. static struct extent_status *
  370. ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
  371. {
  372. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  373. struct extent_status *es1;
  374. struct rb_node *node;
  375. node = rb_prev(&es->rb_node);
  376. if (!node)
  377. return es;
  378. es1 = rb_entry(node, struct extent_status, rb_node);
  379. if (ext4_es_can_be_merged(es1, es)) {
  380. es1->es_len += es->es_len;
  381. if (ext4_es_is_referenced(es))
  382. ext4_es_set_referenced(es1);
  383. rb_erase(&es->rb_node, &tree->root);
  384. ext4_es_free_extent(inode, es);
  385. es = es1;
  386. }
  387. return es;
  388. }
  389. static struct extent_status *
  390. ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
  391. {
  392. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  393. struct extent_status *es1;
  394. struct rb_node *node;
  395. node = rb_next(&es->rb_node);
  396. if (!node)
  397. return es;
  398. es1 = rb_entry(node, struct extent_status, rb_node);
  399. if (ext4_es_can_be_merged(es, es1)) {
  400. es->es_len += es1->es_len;
  401. if (ext4_es_is_referenced(es1))
  402. ext4_es_set_referenced(es);
  403. rb_erase(node, &tree->root);
  404. ext4_es_free_extent(inode, es1);
  405. }
  406. return es;
  407. }
  408. #ifdef ES_AGGRESSIVE_TEST
  409. #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
  410. static void ext4_es_insert_extent_ext_check(struct inode *inode,
  411. struct extent_status *es)
  412. {
  413. struct ext4_ext_path *path = NULL;
  414. struct ext4_extent *ex;
  415. ext4_lblk_t ee_block;
  416. ext4_fsblk_t ee_start;
  417. unsigned short ee_len;
  418. int depth, ee_status, es_status;
  419. path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
  420. if (IS_ERR(path))
  421. return;
  422. depth = ext_depth(inode);
  423. ex = path[depth].p_ext;
  424. if (ex) {
  425. ee_block = le32_to_cpu(ex->ee_block);
  426. ee_start = ext4_ext_pblock(ex);
  427. ee_len = ext4_ext_get_actual_len(ex);
  428. ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
  429. es_status = ext4_es_is_unwritten(es) ? 1 : 0;
  430. /*
  431. * Make sure ex and es are not overlap when we try to insert
  432. * a delayed/hole extent.
  433. */
  434. if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
  435. if (in_range(es->es_lblk, ee_block, ee_len)) {
  436. pr_warn("ES insert assertion failed for "
  437. "inode: %lu we can find an extent "
  438. "at block [%d/%d/%llu/%c], but we "
  439. "want to add a delayed/hole extent "
  440. "[%d/%d/%llu/%x]\n",
  441. inode->i_ino, ee_block, ee_len,
  442. ee_start, ee_status ? 'u' : 'w',
  443. es->es_lblk, es->es_len,
  444. ext4_es_pblock(es), ext4_es_status(es));
  445. }
  446. goto out;
  447. }
  448. /*
  449. * We don't check ee_block == es->es_lblk, etc. because es
  450. * might be a part of whole extent, vice versa.
  451. */
  452. if (es->es_lblk < ee_block ||
  453. ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
  454. pr_warn("ES insert assertion failed for inode: %lu "
  455. "ex_status [%d/%d/%llu/%c] != "
  456. "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
  457. ee_block, ee_len, ee_start,
  458. ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
  459. ext4_es_pblock(es), es_status ? 'u' : 'w');
  460. goto out;
  461. }
  462. if (ee_status ^ es_status) {
  463. pr_warn("ES insert assertion failed for inode: %lu "
  464. "ex_status [%d/%d/%llu/%c] != "
  465. "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
  466. ee_block, ee_len, ee_start,
  467. ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
  468. ext4_es_pblock(es), es_status ? 'u' : 'w');
  469. }
  470. } else {
  471. /*
  472. * We can't find an extent on disk. So we need to make sure
  473. * that we don't want to add an written/unwritten extent.
  474. */
  475. if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
  476. pr_warn("ES insert assertion failed for inode: %lu "
  477. "can't find an extent at block %d but we want "
  478. "to add a written/unwritten extent "
  479. "[%d/%d/%llu/%x]\n", inode->i_ino,
  480. es->es_lblk, es->es_lblk, es->es_len,
  481. ext4_es_pblock(es), ext4_es_status(es));
  482. }
  483. }
  484. out:
  485. ext4_ext_drop_refs(path);
  486. kfree(path);
  487. }
  488. static void ext4_es_insert_extent_ind_check(struct inode *inode,
  489. struct extent_status *es)
  490. {
  491. struct ext4_map_blocks map;
  492. int retval;
  493. /*
  494. * Here we call ext4_ind_map_blocks to lookup a block mapping because
  495. * 'Indirect' structure is defined in indirect.c. So we couldn't
  496. * access direct/indirect tree from outside. It is too dirty to define
  497. * this function in indirect.c file.
  498. */
  499. map.m_lblk = es->es_lblk;
  500. map.m_len = es->es_len;
  501. retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
  502. if (retval > 0) {
  503. if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
  504. /*
  505. * We want to add a delayed/hole extent but this
  506. * block has been allocated.
  507. */
  508. pr_warn("ES insert assertion failed for inode: %lu "
  509. "We can find blocks but we want to add a "
  510. "delayed/hole extent [%d/%d/%llu/%x]\n",
  511. inode->i_ino, es->es_lblk, es->es_len,
  512. ext4_es_pblock(es), ext4_es_status(es));
  513. return;
  514. } else if (ext4_es_is_written(es)) {
  515. if (retval != es->es_len) {
  516. pr_warn("ES insert assertion failed for "
  517. "inode: %lu retval %d != es_len %d\n",
  518. inode->i_ino, retval, es->es_len);
  519. return;
  520. }
  521. if (map.m_pblk != ext4_es_pblock(es)) {
  522. pr_warn("ES insert assertion failed for "
  523. "inode: %lu m_pblk %llu != "
  524. "es_pblk %llu\n",
  525. inode->i_ino, map.m_pblk,
  526. ext4_es_pblock(es));
  527. return;
  528. }
  529. } else {
  530. /*
  531. * We don't need to check unwritten extent because
  532. * indirect-based file doesn't have it.
  533. */
  534. BUG_ON(1);
  535. }
  536. } else if (retval == 0) {
  537. if (ext4_es_is_written(es)) {
  538. pr_warn("ES insert assertion failed for inode: %lu "
  539. "We can't find the block but we want to add "
  540. "a written extent [%d/%d/%llu/%x]\n",
  541. inode->i_ino, es->es_lblk, es->es_len,
  542. ext4_es_pblock(es), ext4_es_status(es));
  543. return;
  544. }
  545. }
  546. }
  547. static inline void ext4_es_insert_extent_check(struct inode *inode,
  548. struct extent_status *es)
  549. {
  550. /*
  551. * We don't need to worry about the race condition because
  552. * caller takes i_data_sem locking.
  553. */
  554. BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
  555. if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
  556. ext4_es_insert_extent_ext_check(inode, es);
  557. else
  558. ext4_es_insert_extent_ind_check(inode, es);
  559. }
  560. #else
  561. static inline void ext4_es_insert_extent_check(struct inode *inode,
  562. struct extent_status *es)
  563. {
  564. }
  565. #endif
  566. static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
  567. {
  568. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  569. struct rb_node **p = &tree->root.rb_node;
  570. struct rb_node *parent = NULL;
  571. struct extent_status *es;
  572. while (*p) {
  573. parent = *p;
  574. es = rb_entry(parent, struct extent_status, rb_node);
  575. if (newes->es_lblk < es->es_lblk) {
  576. if (ext4_es_can_be_merged(newes, es)) {
  577. /*
  578. * Here we can modify es_lblk directly
  579. * because it isn't overlapped.
  580. */
  581. es->es_lblk = newes->es_lblk;
  582. es->es_len += newes->es_len;
  583. if (ext4_es_is_written(es) ||
  584. ext4_es_is_unwritten(es))
  585. ext4_es_store_pblock(es,
  586. newes->es_pblk);
  587. es = ext4_es_try_to_merge_left(inode, es);
  588. goto out;
  589. }
  590. p = &(*p)->rb_left;
  591. } else if (newes->es_lblk > ext4_es_end(es)) {
  592. if (ext4_es_can_be_merged(es, newes)) {
  593. es->es_len += newes->es_len;
  594. es = ext4_es_try_to_merge_right(inode, es);
  595. goto out;
  596. }
  597. p = &(*p)->rb_right;
  598. } else {
  599. BUG_ON(1);
  600. return -EINVAL;
  601. }
  602. }
  603. es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
  604. newes->es_pblk);
  605. if (!es)
  606. return -ENOMEM;
  607. rb_link_node(&es->rb_node, parent, p);
  608. rb_insert_color(&es->rb_node, &tree->root);
  609. out:
  610. tree->cache_es = es;
  611. return 0;
  612. }
  613. /*
  614. * ext4_es_insert_extent() adds information to an inode's extent
  615. * status tree.
  616. *
  617. * Return 0 on success, error code on failure.
  618. */
  619. int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
  620. ext4_lblk_t len, ext4_fsblk_t pblk,
  621. unsigned int status)
  622. {
  623. struct extent_status newes;
  624. ext4_lblk_t end = lblk + len - 1;
  625. int err = 0;
  626. es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
  627. lblk, len, pblk, status, inode->i_ino);
  628. if (!len)
  629. return 0;
  630. BUG_ON(end < lblk);
  631. newes.es_lblk = lblk;
  632. newes.es_len = len;
  633. ext4_es_store_pblock_status(&newes, pblk, status);
  634. trace_ext4_es_insert_extent(inode, &newes);
  635. ext4_es_insert_extent_check(inode, &newes);
  636. write_lock(&EXT4_I(inode)->i_es_lock);
  637. err = __es_remove_extent(inode, lblk, end);
  638. if (err != 0)
  639. goto error;
  640. retry:
  641. err = __es_insert_extent(inode, &newes);
  642. if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
  643. 128, EXT4_I(inode)))
  644. goto retry;
  645. if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
  646. err = 0;
  647. error:
  648. write_unlock(&EXT4_I(inode)->i_es_lock);
  649. ext4_es_print_tree(inode);
  650. return err;
  651. }
  652. /*
  653. * ext4_es_cache_extent() inserts information into the extent status
  654. * tree if and only if there isn't information about the range in
  655. * question already.
  656. */
  657. void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
  658. ext4_lblk_t len, ext4_fsblk_t pblk,
  659. unsigned int status)
  660. {
  661. struct extent_status *es;
  662. struct extent_status newes;
  663. ext4_lblk_t end = lblk + len - 1;
  664. newes.es_lblk = lblk;
  665. newes.es_len = len;
  666. ext4_es_store_pblock_status(&newes, pblk, status);
  667. trace_ext4_es_cache_extent(inode, &newes);
  668. if (!len)
  669. return;
  670. BUG_ON(end < lblk);
  671. write_lock(&EXT4_I(inode)->i_es_lock);
  672. es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
  673. if (!es || es->es_lblk > end)
  674. __es_insert_extent(inode, &newes);
  675. write_unlock(&EXT4_I(inode)->i_es_lock);
  676. }
  677. /*
  678. * ext4_es_lookup_extent() looks up an extent in extent status tree.
  679. *
  680. * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
  681. *
  682. * Return: 1 on found, 0 on not
  683. */
  684. int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
  685. struct extent_status *es)
  686. {
  687. struct ext4_es_tree *tree;
  688. struct ext4_es_stats *stats;
  689. struct extent_status *es1 = NULL;
  690. struct rb_node *node;
  691. int found = 0;
  692. trace_ext4_es_lookup_extent_enter(inode, lblk);
  693. es_debug("lookup extent in block %u\n", lblk);
  694. tree = &EXT4_I(inode)->i_es_tree;
  695. read_lock(&EXT4_I(inode)->i_es_lock);
  696. /* find extent in cache firstly */
  697. es->es_lblk = es->es_len = es->es_pblk = 0;
  698. if (tree->cache_es) {
  699. es1 = tree->cache_es;
  700. if (in_range(lblk, es1->es_lblk, es1->es_len)) {
  701. es_debug("%u cached by [%u/%u)\n",
  702. lblk, es1->es_lblk, es1->es_len);
  703. found = 1;
  704. goto out;
  705. }
  706. }
  707. node = tree->root.rb_node;
  708. while (node) {
  709. es1 = rb_entry(node, struct extent_status, rb_node);
  710. if (lblk < es1->es_lblk)
  711. node = node->rb_left;
  712. else if (lblk > ext4_es_end(es1))
  713. node = node->rb_right;
  714. else {
  715. found = 1;
  716. break;
  717. }
  718. }
  719. out:
  720. stats = &EXT4_SB(inode->i_sb)->s_es_stats;
  721. if (found) {
  722. BUG_ON(!es1);
  723. es->es_lblk = es1->es_lblk;
  724. es->es_len = es1->es_len;
  725. es->es_pblk = es1->es_pblk;
  726. if (!ext4_es_is_referenced(es))
  727. ext4_es_set_referenced(es);
  728. stats->es_stats_cache_hits++;
  729. } else {
  730. stats->es_stats_cache_misses++;
  731. }
  732. read_unlock(&EXT4_I(inode)->i_es_lock);
  733. trace_ext4_es_lookup_extent_exit(inode, es, found);
  734. return found;
  735. }
  736. static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
  737. ext4_lblk_t end)
  738. {
  739. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  740. struct rb_node *node;
  741. struct extent_status *es;
  742. struct extent_status orig_es;
  743. ext4_lblk_t len1, len2;
  744. ext4_fsblk_t block;
  745. int err;
  746. retry:
  747. err = 0;
  748. es = __es_tree_search(&tree->root, lblk);
  749. if (!es)
  750. goto out;
  751. if (es->es_lblk > end)
  752. goto out;
  753. /* Simply invalidate cache_es. */
  754. tree->cache_es = NULL;
  755. orig_es.es_lblk = es->es_lblk;
  756. orig_es.es_len = es->es_len;
  757. orig_es.es_pblk = es->es_pblk;
  758. len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
  759. len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
  760. if (len1 > 0)
  761. es->es_len = len1;
  762. if (len2 > 0) {
  763. if (len1 > 0) {
  764. struct extent_status newes;
  765. newes.es_lblk = end + 1;
  766. newes.es_len = len2;
  767. block = 0x7FDEADBEEFULL;
  768. if (ext4_es_is_written(&orig_es) ||
  769. ext4_es_is_unwritten(&orig_es))
  770. block = ext4_es_pblock(&orig_es) +
  771. orig_es.es_len - len2;
  772. ext4_es_store_pblock_status(&newes, block,
  773. ext4_es_status(&orig_es));
  774. err = __es_insert_extent(inode, &newes);
  775. if (err) {
  776. es->es_lblk = orig_es.es_lblk;
  777. es->es_len = orig_es.es_len;
  778. if ((err == -ENOMEM) &&
  779. __es_shrink(EXT4_SB(inode->i_sb),
  780. 128, EXT4_I(inode)))
  781. goto retry;
  782. goto out;
  783. }
  784. } else {
  785. es->es_lblk = end + 1;
  786. es->es_len = len2;
  787. if (ext4_es_is_written(es) ||
  788. ext4_es_is_unwritten(es)) {
  789. block = orig_es.es_pblk + orig_es.es_len - len2;
  790. ext4_es_store_pblock(es, block);
  791. }
  792. }
  793. goto out;
  794. }
  795. if (len1 > 0) {
  796. node = rb_next(&es->rb_node);
  797. if (node)
  798. es = rb_entry(node, struct extent_status, rb_node);
  799. else
  800. es = NULL;
  801. }
  802. while (es && ext4_es_end(es) <= end) {
  803. node = rb_next(&es->rb_node);
  804. rb_erase(&es->rb_node, &tree->root);
  805. ext4_es_free_extent(inode, es);
  806. if (!node) {
  807. es = NULL;
  808. break;
  809. }
  810. es = rb_entry(node, struct extent_status, rb_node);
  811. }
  812. if (es && es->es_lblk < end + 1) {
  813. ext4_lblk_t orig_len = es->es_len;
  814. len1 = ext4_es_end(es) - end;
  815. es->es_lblk = end + 1;
  816. es->es_len = len1;
  817. if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
  818. block = es->es_pblk + orig_len - len1;
  819. ext4_es_store_pblock(es, block);
  820. }
  821. }
  822. out:
  823. return err;
  824. }
  825. /*
  826. * ext4_es_remove_extent() removes a space from a extent status tree.
  827. *
  828. * Return 0 on success, error code on failure.
  829. */
  830. int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
  831. ext4_lblk_t len)
  832. {
  833. ext4_lblk_t end;
  834. int err = 0;
  835. trace_ext4_es_remove_extent(inode, lblk, len);
  836. es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
  837. lblk, len, inode->i_ino);
  838. if (!len)
  839. return err;
  840. end = lblk + len - 1;
  841. BUG_ON(end < lblk);
  842. /*
  843. * ext4_clear_inode() depends on us taking i_es_lock unconditionally
  844. * so that we are sure __es_shrink() is done with the inode before it
  845. * is reclaimed.
  846. */
  847. write_lock(&EXT4_I(inode)->i_es_lock);
  848. err = __es_remove_extent(inode, lblk, end);
  849. write_unlock(&EXT4_I(inode)->i_es_lock);
  850. ext4_es_print_tree(inode);
  851. return err;
  852. }
  853. static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
  854. struct ext4_inode_info *locked_ei)
  855. {
  856. struct ext4_inode_info *ei;
  857. struct ext4_es_stats *es_stats;
  858. ktime_t start_time;
  859. u64 scan_time;
  860. int nr_to_walk;
  861. int nr_shrunk = 0;
  862. int retried = 0, nr_skipped = 0;
  863. es_stats = &sbi->s_es_stats;
  864. start_time = ktime_get();
  865. retry:
  866. spin_lock(&sbi->s_es_lock);
  867. nr_to_walk = sbi->s_es_nr_inode;
  868. while (nr_to_walk-- > 0) {
  869. if (list_empty(&sbi->s_es_list)) {
  870. spin_unlock(&sbi->s_es_lock);
  871. goto out;
  872. }
  873. ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
  874. i_es_list);
  875. /* Move the inode to the tail */
  876. list_move_tail(&ei->i_es_list, &sbi->s_es_list);
  877. /*
  878. * Normally we try hard to avoid shrinking precached inodes,
  879. * but we will as a last resort.
  880. */
  881. if (!retried && ext4_test_inode_state(&ei->vfs_inode,
  882. EXT4_STATE_EXT_PRECACHED)) {
  883. nr_skipped++;
  884. continue;
  885. }
  886. if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
  887. nr_skipped++;
  888. continue;
  889. }
  890. /*
  891. * Now we hold i_es_lock which protects us from inode reclaim
  892. * freeing inode under us
  893. */
  894. spin_unlock(&sbi->s_es_lock);
  895. nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
  896. write_unlock(&ei->i_es_lock);
  897. if (nr_to_scan <= 0)
  898. goto out;
  899. spin_lock(&sbi->s_es_lock);
  900. }
  901. spin_unlock(&sbi->s_es_lock);
  902. /*
  903. * If we skipped any inodes, and we weren't able to make any
  904. * forward progress, try again to scan precached inodes.
  905. */
  906. if ((nr_shrunk == 0) && nr_skipped && !retried) {
  907. retried++;
  908. goto retry;
  909. }
  910. if (locked_ei && nr_shrunk == 0)
  911. nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
  912. out:
  913. scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
  914. if (likely(es_stats->es_stats_scan_time))
  915. es_stats->es_stats_scan_time = (scan_time +
  916. es_stats->es_stats_scan_time*3) / 4;
  917. else
  918. es_stats->es_stats_scan_time = scan_time;
  919. if (scan_time > es_stats->es_stats_max_scan_time)
  920. es_stats->es_stats_max_scan_time = scan_time;
  921. if (likely(es_stats->es_stats_shrunk))
  922. es_stats->es_stats_shrunk = (nr_shrunk +
  923. es_stats->es_stats_shrunk*3) / 4;
  924. else
  925. es_stats->es_stats_shrunk = nr_shrunk;
  926. trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
  927. nr_skipped, retried);
  928. return nr_shrunk;
  929. }
  930. static unsigned long ext4_es_count(struct shrinker *shrink,
  931. struct shrink_control *sc)
  932. {
  933. unsigned long nr;
  934. struct ext4_sb_info *sbi;
  935. sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
  936. nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
  937. trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
  938. return nr;
  939. }
  940. static unsigned long ext4_es_scan(struct shrinker *shrink,
  941. struct shrink_control *sc)
  942. {
  943. struct ext4_sb_info *sbi = container_of(shrink,
  944. struct ext4_sb_info, s_es_shrinker);
  945. int nr_to_scan = sc->nr_to_scan;
  946. int ret, nr_shrunk;
  947. ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
  948. trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
  949. if (!nr_to_scan)
  950. return ret;
  951. nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
  952. trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
  953. return nr_shrunk;
  954. }
  955. static void *ext4_es_seq_shrinker_info_start(struct seq_file *seq, loff_t *pos)
  956. {
  957. return *pos ? NULL : SEQ_START_TOKEN;
  958. }
  959. static void *
  960. ext4_es_seq_shrinker_info_next(struct seq_file *seq, void *v, loff_t *pos)
  961. {
  962. return NULL;
  963. }
  964. static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
  965. {
  966. struct ext4_sb_info *sbi = seq->private;
  967. struct ext4_es_stats *es_stats = &sbi->s_es_stats;
  968. struct ext4_inode_info *ei, *max = NULL;
  969. unsigned int inode_cnt = 0;
  970. if (v != SEQ_START_TOKEN)
  971. return 0;
  972. /* here we just find an inode that has the max nr. of objects */
  973. spin_lock(&sbi->s_es_lock);
  974. list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
  975. inode_cnt++;
  976. if (max && max->i_es_all_nr < ei->i_es_all_nr)
  977. max = ei;
  978. else if (!max)
  979. max = ei;
  980. }
  981. spin_unlock(&sbi->s_es_lock);
  982. seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
  983. percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
  984. percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
  985. seq_printf(seq, " %lu/%lu cache hits/misses\n",
  986. es_stats->es_stats_cache_hits,
  987. es_stats->es_stats_cache_misses);
  988. if (inode_cnt)
  989. seq_printf(seq, " %d inodes on list\n", inode_cnt);
  990. seq_printf(seq, "average:\n %llu us scan time\n",
  991. div_u64(es_stats->es_stats_scan_time, 1000));
  992. seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk);
  993. if (inode_cnt)
  994. seq_printf(seq,
  995. "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
  996. " %llu us max scan time\n",
  997. max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
  998. div_u64(es_stats->es_stats_max_scan_time, 1000));
  999. return 0;
  1000. }
  1001. static void ext4_es_seq_shrinker_info_stop(struct seq_file *seq, void *v)
  1002. {
  1003. }
  1004. static const struct seq_operations ext4_es_seq_shrinker_info_ops = {
  1005. .start = ext4_es_seq_shrinker_info_start,
  1006. .next = ext4_es_seq_shrinker_info_next,
  1007. .stop = ext4_es_seq_shrinker_info_stop,
  1008. .show = ext4_es_seq_shrinker_info_show,
  1009. };
  1010. static int
  1011. ext4_es_seq_shrinker_info_open(struct inode *inode, struct file *file)
  1012. {
  1013. int ret;
  1014. ret = seq_open(file, &ext4_es_seq_shrinker_info_ops);
  1015. if (!ret) {
  1016. struct seq_file *m = file->private_data;
  1017. m->private = PDE_DATA(inode);
  1018. }
  1019. return ret;
  1020. }
  1021. static int
  1022. ext4_es_seq_shrinker_info_release(struct inode *inode, struct file *file)
  1023. {
  1024. return seq_release(inode, file);
  1025. }
  1026. static const struct file_operations ext4_es_seq_shrinker_info_fops = {
  1027. .owner = THIS_MODULE,
  1028. .open = ext4_es_seq_shrinker_info_open,
  1029. .read = seq_read,
  1030. .llseek = seq_lseek,
  1031. .release = ext4_es_seq_shrinker_info_release,
  1032. };
  1033. int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
  1034. {
  1035. int err;
  1036. /* Make sure we have enough bits for physical block number */
  1037. BUILD_BUG_ON(ES_SHIFT < 48);
  1038. INIT_LIST_HEAD(&sbi->s_es_list);
  1039. sbi->s_es_nr_inode = 0;
  1040. spin_lock_init(&sbi->s_es_lock);
  1041. sbi->s_es_stats.es_stats_shrunk = 0;
  1042. sbi->s_es_stats.es_stats_cache_hits = 0;
  1043. sbi->s_es_stats.es_stats_cache_misses = 0;
  1044. sbi->s_es_stats.es_stats_scan_time = 0;
  1045. sbi->s_es_stats.es_stats_max_scan_time = 0;
  1046. err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
  1047. if (err)
  1048. return err;
  1049. err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
  1050. if (err)
  1051. goto err1;
  1052. sbi->s_es_shrinker.scan_objects = ext4_es_scan;
  1053. sbi->s_es_shrinker.count_objects = ext4_es_count;
  1054. sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
  1055. err = register_shrinker(&sbi->s_es_shrinker);
  1056. if (err)
  1057. goto err2;
  1058. if (sbi->s_proc)
  1059. proc_create_data("es_shrinker_info", S_IRUGO, sbi->s_proc,
  1060. &ext4_es_seq_shrinker_info_fops, sbi);
  1061. return 0;
  1062. err2:
  1063. percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
  1064. err1:
  1065. percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
  1066. return err;
  1067. }
  1068. void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
  1069. {
  1070. if (sbi->s_proc)
  1071. remove_proc_entry("es_shrinker_info", sbi->s_proc);
  1072. percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
  1073. percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
  1074. unregister_shrinker(&sbi->s_es_shrinker);
  1075. }
  1076. /*
  1077. * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
  1078. * most *nr_to_scan extents, update *nr_to_scan accordingly.
  1079. *
  1080. * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
  1081. * Increment *nr_shrunk by the number of reclaimed extents. Also update
  1082. * ei->i_es_shrink_lblk to where we should continue scanning.
  1083. */
  1084. static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
  1085. int *nr_to_scan, int *nr_shrunk)
  1086. {
  1087. struct inode *inode = &ei->vfs_inode;
  1088. struct ext4_es_tree *tree = &ei->i_es_tree;
  1089. struct extent_status *es;
  1090. struct rb_node *node;
  1091. es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
  1092. if (!es)
  1093. goto out_wrap;
  1094. node = &es->rb_node;
  1095. while (*nr_to_scan > 0) {
  1096. if (es->es_lblk > end) {
  1097. ei->i_es_shrink_lblk = end + 1;
  1098. return 0;
  1099. }
  1100. (*nr_to_scan)--;
  1101. node = rb_next(&es->rb_node);
  1102. /*
  1103. * We can't reclaim delayed extent from status tree because
  1104. * fiemap, bigallic, and seek_data/hole need to use it.
  1105. */
  1106. if (ext4_es_is_delayed(es))
  1107. goto next;
  1108. if (ext4_es_is_referenced(es)) {
  1109. ext4_es_clear_referenced(es);
  1110. goto next;
  1111. }
  1112. rb_erase(&es->rb_node, &tree->root);
  1113. ext4_es_free_extent(inode, es);
  1114. (*nr_shrunk)++;
  1115. next:
  1116. if (!node)
  1117. goto out_wrap;
  1118. es = rb_entry(node, struct extent_status, rb_node);
  1119. }
  1120. ei->i_es_shrink_lblk = es->es_lblk;
  1121. return 1;
  1122. out_wrap:
  1123. ei->i_es_shrink_lblk = 0;
  1124. return 0;
  1125. }
  1126. static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
  1127. {
  1128. struct inode *inode = &ei->vfs_inode;
  1129. int nr_shrunk = 0;
  1130. ext4_lblk_t start = ei->i_es_shrink_lblk;
  1131. static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
  1132. DEFAULT_RATELIMIT_BURST);
  1133. if (ei->i_es_shk_nr == 0)
  1134. return 0;
  1135. if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
  1136. __ratelimit(&_rs))
  1137. ext4_warning(inode->i_sb, "forced shrink of precached extents");
  1138. if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
  1139. start != 0)
  1140. es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
  1141. ei->i_es_tree.cache_es = NULL;
  1142. return nr_shrunk;
  1143. }