extents_status.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/ext4/extents_status.c
  4. *
  5. * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
  6. * Modified by
  7. * Allison Henderson <achender@linux.vnet.ibm.com>
  8. * Hugh Dickins <hughd@google.com>
  9. * Zheng Liu <wenqing.lz@taobao.com>
  10. *
  11. * Ext4 extents status tree core functions.
  12. */
  13. #include <linux/list_sort.h>
  14. #include <linux/proc_fs.h>
  15. #include <linux/seq_file.h>
  16. #include "ext4.h"
  17. #include <trace/events/ext4.h>
  18. /*
  19. * According to previous discussion in Ext4 Developer Workshop, we
  20. * will introduce a new structure called io tree to track all extent
  21. * status in order to solve some problems that we have met
  22. * (e.g. Reservation space warning), and provide extent-level locking.
  23. * Delay extent tree is the first step to achieve this goal. It is
  24. * original built by Yongqiang Yang. At that time it is called delay
  25. * extent tree, whose goal is only track delayed extents in memory to
  26. * simplify the implementation of fiemap and bigalloc, and introduce
  27. * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
  28. * delay extent tree at the first commit. But for better understand
  29. * what it does, it has been rename to extent status tree.
  30. *
  31. * Step1:
  32. * Currently the first step has been done. All delayed extents are
  33. * tracked in the tree. It maintains the delayed extent when a delayed
  34. * allocation is issued, and the delayed extent is written out or
  35. * invalidated. Therefore the implementation of fiemap and bigalloc
  36. * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
  37. *
  38. * The following comment describes the implemenmtation of extent
  39. * status tree and future works.
  40. *
  41. * Step2:
  42. * In this step all extent status are tracked by extent status tree.
  43. * Thus, we can first try to lookup a block mapping in this tree before
  44. * finding it in extent tree. Hence, single extent cache can be removed
  45. * because extent status tree can do a better job. Extents in status
  46. * tree are loaded on-demand. Therefore, the extent status tree may not
  47. * contain all of the extents in a file. Meanwhile we define a shrinker
  48. * to reclaim memory from extent status tree because fragmented extent
  49. * tree will make status tree cost too much memory. written/unwritten/-
  50. * hole extents in the tree will be reclaimed by this shrinker when we
  51. * are under high memory pressure. Delayed extents will not be
  52. * reclimed because fiemap, bigalloc, and seek_data/hole need it.
  53. */
  54. /*
  55. * Extent status tree implementation for ext4.
  56. *
  57. *
  58. * ==========================================================================
  59. * Extent status tree tracks all extent status.
  60. *
  61. * 1. Why we need to implement extent status tree?
  62. *
  63. * Without extent status tree, ext4 identifies a delayed extent by looking
  64. * up page cache, this has several deficiencies - complicated, buggy,
  65. * and inefficient code.
  66. *
  67. * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
  68. * block or a range of blocks are belonged to a delayed extent.
  69. *
  70. * Let us have a look at how they do without extent status tree.
  71. * -- FIEMAP
  72. * FIEMAP looks up page cache to identify delayed allocations from holes.
  73. *
  74. * -- SEEK_HOLE/DATA
  75. * SEEK_HOLE/DATA has the same problem as FIEMAP.
  76. *
  77. * -- bigalloc
  78. * bigalloc looks up page cache to figure out if a block is
  79. * already under delayed allocation or not to determine whether
  80. * quota reserving is needed for the cluster.
  81. *
  82. * -- writeout
  83. * Writeout looks up whole page cache to see if a buffer is
  84. * mapped, If there are not very many delayed buffers, then it is
  85. * time consuming.
  86. *
  87. * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
  88. * bigalloc and writeout can figure out if a block or a range of
  89. * blocks is under delayed allocation(belonged to a delayed extent) or
  90. * not by searching the extent tree.
  91. *
  92. *
  93. * ==========================================================================
  94. * 2. Ext4 extent status tree impelmentation
  95. *
  96. * -- extent
  97. * A extent is a range of blocks which are contiguous logically and
  98. * physically. Unlike extent in extent tree, this extent in ext4 is
  99. * a in-memory struct, there is no corresponding on-disk data. There
  100. * is no limit on length of extent, so an extent can contain as many
  101. * blocks as they are contiguous logically and physically.
  102. *
  103. * -- extent status tree
  104. * Every inode has an extent status tree and all allocation blocks
  105. * are added to the tree with different status. The extent in the
  106. * tree are ordered by logical block no.
  107. *
  108. * -- operations on a extent status tree
  109. * There are three important operations on a delayed extent tree: find
  110. * next extent, adding a extent(a range of blocks) and removing a extent.
  111. *
  112. * -- race on a extent status tree
  113. * Extent status tree is protected by inode->i_es_lock.
  114. *
  115. * -- memory consumption
  116. * Fragmented extent tree will make extent status tree cost too much
  117. * memory. Hence, we will reclaim written/unwritten/hole extents from
  118. * the tree under a heavy memory pressure.
  119. *
  120. *
  121. * ==========================================================================
  122. * 3. Performance analysis
  123. *
  124. * -- overhead
  125. * 1. There is a cache extent for write access, so if writes are
  126. * not very random, adding space operaions are in O(1) time.
  127. *
  128. * -- gain
  129. * 2. Code is much simpler, more readable, more maintainable and
  130. * more efficient.
  131. *
  132. *
  133. * ==========================================================================
  134. * 4. TODO list
  135. *
  136. * -- Refactor delayed space reservation
  137. *
  138. * -- Extent-level locking
  139. */
  140. static struct kmem_cache *ext4_es_cachep;
  141. static struct kmem_cache *ext4_pending_cachep;
  142. static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
  143. static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
  144. ext4_lblk_t end);
  145. static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
  146. static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
  147. struct ext4_inode_info *locked_ei);
  148. static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
  149. ext4_lblk_t len);
  150. int __init ext4_init_es(void)
  151. {
  152. ext4_es_cachep = kmem_cache_create("ext4_extent_status",
  153. sizeof(struct extent_status),
  154. 0, (SLAB_RECLAIM_ACCOUNT), NULL);
  155. if (ext4_es_cachep == NULL)
  156. return -ENOMEM;
  157. return 0;
  158. }
  159. void ext4_exit_es(void)
  160. {
  161. kmem_cache_destroy(ext4_es_cachep);
  162. }
  163. void ext4_es_init_tree(struct ext4_es_tree *tree)
  164. {
  165. tree->root = RB_ROOT;
  166. tree->cache_es = NULL;
  167. }
  168. #ifdef ES_DEBUG__
  169. static void ext4_es_print_tree(struct inode *inode)
  170. {
  171. struct ext4_es_tree *tree;
  172. struct rb_node *node;
  173. printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
  174. tree = &EXT4_I(inode)->i_es_tree;
  175. node = rb_first(&tree->root);
  176. while (node) {
  177. struct extent_status *es;
  178. es = rb_entry(node, struct extent_status, rb_node);
  179. printk(KERN_DEBUG " [%u/%u) %llu %x",
  180. es->es_lblk, es->es_len,
  181. ext4_es_pblock(es), ext4_es_status(es));
  182. node = rb_next(node);
  183. }
  184. printk(KERN_DEBUG "\n");
  185. }
  186. #else
  187. #define ext4_es_print_tree(inode)
  188. #endif
  189. static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
  190. {
  191. BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
  192. return es->es_lblk + es->es_len - 1;
  193. }
  194. /*
  195. * search through the tree for an delayed extent with a given offset. If
  196. * it can't be found, try to find next extent.
  197. */
  198. static struct extent_status *__es_tree_search(struct rb_root *root,
  199. ext4_lblk_t lblk)
  200. {
  201. struct rb_node *node = root->rb_node;
  202. struct extent_status *es = NULL;
  203. while (node) {
  204. es = rb_entry(node, struct extent_status, rb_node);
  205. if (lblk < es->es_lblk)
  206. node = node->rb_left;
  207. else if (lblk > ext4_es_end(es))
  208. node = node->rb_right;
  209. else
  210. return es;
  211. }
  212. if (es && lblk < es->es_lblk)
  213. return es;
  214. if (es && lblk > ext4_es_end(es)) {
  215. node = rb_next(&es->rb_node);
  216. return node ? rb_entry(node, struct extent_status, rb_node) :
  217. NULL;
  218. }
  219. return NULL;
  220. }
  221. /*
  222. * ext4_es_find_extent_range - find extent with specified status within block
  223. * range or next extent following block range in
  224. * extents status tree
  225. *
  226. * @inode - file containing the range
  227. * @matching_fn - pointer to function that matches extents with desired status
  228. * @lblk - logical block defining start of range
  229. * @end - logical block defining end of range
  230. * @es - extent found, if any
  231. *
  232. * Find the first extent within the block range specified by @lblk and @end
  233. * in the extents status tree that satisfies @matching_fn. If a match
  234. * is found, it's returned in @es. If not, and a matching extent is found
  235. * beyond the block range, it's returned in @es. If no match is found, an
  236. * extent is returned in @es whose es_lblk, es_len, and es_pblk components
  237. * are 0.
  238. */
  239. static void __es_find_extent_range(struct inode *inode,
  240. int (*matching_fn)(struct extent_status *es),
  241. ext4_lblk_t lblk, ext4_lblk_t end,
  242. struct extent_status *es)
  243. {
  244. struct ext4_es_tree *tree = NULL;
  245. struct extent_status *es1 = NULL;
  246. struct rb_node *node;
  247. WARN_ON(es == NULL);
  248. WARN_ON(end < lblk);
  249. tree = &EXT4_I(inode)->i_es_tree;
  250. /* see if the extent has been cached */
  251. es->es_lblk = es->es_len = es->es_pblk = 0;
  252. if (tree->cache_es) {
  253. es1 = tree->cache_es;
  254. if (in_range(lblk, es1->es_lblk, es1->es_len)) {
  255. es_debug("%u cached by [%u/%u) %llu %x\n",
  256. lblk, es1->es_lblk, es1->es_len,
  257. ext4_es_pblock(es1), ext4_es_status(es1));
  258. goto out;
  259. }
  260. }
  261. es1 = __es_tree_search(&tree->root, lblk);
  262. out:
  263. if (es1 && !matching_fn(es1)) {
  264. while ((node = rb_next(&es1->rb_node)) != NULL) {
  265. es1 = rb_entry(node, struct extent_status, rb_node);
  266. if (es1->es_lblk > end) {
  267. es1 = NULL;
  268. break;
  269. }
  270. if (matching_fn(es1))
  271. break;
  272. }
  273. }
  274. if (es1 && matching_fn(es1)) {
  275. tree->cache_es = es1;
  276. es->es_lblk = es1->es_lblk;
  277. es->es_len = es1->es_len;
  278. es->es_pblk = es1->es_pblk;
  279. }
  280. }
  281. /*
  282. * Locking for __es_find_extent_range() for external use
  283. */
  284. void ext4_es_find_extent_range(struct inode *inode,
  285. int (*matching_fn)(struct extent_status *es),
  286. ext4_lblk_t lblk, ext4_lblk_t end,
  287. struct extent_status *es)
  288. {
  289. trace_ext4_es_find_extent_range_enter(inode, lblk);
  290. read_lock(&EXT4_I(inode)->i_es_lock);
  291. __es_find_extent_range(inode, matching_fn, lblk, end, es);
  292. read_unlock(&EXT4_I(inode)->i_es_lock);
  293. trace_ext4_es_find_extent_range_exit(inode, es);
  294. }
  295. /*
  296. * __es_scan_range - search block range for block with specified status
  297. * in extents status tree
  298. *
  299. * @inode - file containing the range
  300. * @matching_fn - pointer to function that matches extents with desired status
  301. * @lblk - logical block defining start of range
  302. * @end - logical block defining end of range
  303. *
  304. * Returns true if at least one block in the specified block range satisfies
  305. * the criterion specified by @matching_fn, and false if not. If at least
  306. * one extent has the specified status, then there is at least one block
  307. * in the cluster with that status. Should only be called by code that has
  308. * taken i_es_lock.
  309. */
  310. static bool __es_scan_range(struct inode *inode,
  311. int (*matching_fn)(struct extent_status *es),
  312. ext4_lblk_t start, ext4_lblk_t end)
  313. {
  314. struct extent_status es;
  315. __es_find_extent_range(inode, matching_fn, start, end, &es);
  316. if (es.es_len == 0)
  317. return false; /* no matching extent in the tree */
  318. else if (es.es_lblk <= start &&
  319. start < es.es_lblk + es.es_len)
  320. return true;
  321. else if (start <= es.es_lblk && es.es_lblk <= end)
  322. return true;
  323. else
  324. return false;
  325. }
  326. /*
  327. * Locking for __es_scan_range() for external use
  328. */
  329. bool ext4_es_scan_range(struct inode *inode,
  330. int (*matching_fn)(struct extent_status *es),
  331. ext4_lblk_t lblk, ext4_lblk_t end)
  332. {
  333. bool ret;
  334. read_lock(&EXT4_I(inode)->i_es_lock);
  335. ret = __es_scan_range(inode, matching_fn, lblk, end);
  336. read_unlock(&EXT4_I(inode)->i_es_lock);
  337. return ret;
  338. }
  339. /*
  340. * __es_scan_clu - search cluster for block with specified status in
  341. * extents status tree
  342. *
  343. * @inode - file containing the cluster
  344. * @matching_fn - pointer to function that matches extents with desired status
  345. * @lblk - logical block in cluster to be searched
  346. *
  347. * Returns true if at least one extent in the cluster containing @lblk
  348. * satisfies the criterion specified by @matching_fn, and false if not. If at
  349. * least one extent has the specified status, then there is at least one block
  350. * in the cluster with that status. Should only be called by code that has
  351. * taken i_es_lock.
  352. */
  353. static bool __es_scan_clu(struct inode *inode,
  354. int (*matching_fn)(struct extent_status *es),
  355. ext4_lblk_t lblk)
  356. {
  357. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  358. ext4_lblk_t lblk_start, lblk_end;
  359. lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
  360. lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
  361. return __es_scan_range(inode, matching_fn, lblk_start, lblk_end);
  362. }
  363. /*
  364. * Locking for __es_scan_clu() for external use
  365. */
  366. bool ext4_es_scan_clu(struct inode *inode,
  367. int (*matching_fn)(struct extent_status *es),
  368. ext4_lblk_t lblk)
  369. {
  370. bool ret;
  371. read_lock(&EXT4_I(inode)->i_es_lock);
  372. ret = __es_scan_clu(inode, matching_fn, lblk);
  373. read_unlock(&EXT4_I(inode)->i_es_lock);
  374. return ret;
  375. }
  376. static void ext4_es_list_add(struct inode *inode)
  377. {
  378. struct ext4_inode_info *ei = EXT4_I(inode);
  379. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  380. if (!list_empty(&ei->i_es_list))
  381. return;
  382. spin_lock(&sbi->s_es_lock);
  383. if (list_empty(&ei->i_es_list)) {
  384. list_add_tail(&ei->i_es_list, &sbi->s_es_list);
  385. sbi->s_es_nr_inode++;
  386. }
  387. spin_unlock(&sbi->s_es_lock);
  388. }
  389. static void ext4_es_list_del(struct inode *inode)
  390. {
  391. struct ext4_inode_info *ei = EXT4_I(inode);
  392. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  393. spin_lock(&sbi->s_es_lock);
  394. if (!list_empty(&ei->i_es_list)) {
  395. list_del_init(&ei->i_es_list);
  396. sbi->s_es_nr_inode--;
  397. WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
  398. }
  399. spin_unlock(&sbi->s_es_lock);
  400. }
  401. static struct extent_status *
  402. ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
  403. ext4_fsblk_t pblk)
  404. {
  405. struct extent_status *es;
  406. es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
  407. if (es == NULL)
  408. return NULL;
  409. es->es_lblk = lblk;
  410. es->es_len = len;
  411. es->es_pblk = pblk;
  412. /*
  413. * We don't count delayed extent because we never try to reclaim them
  414. */
  415. if (!ext4_es_is_delayed(es)) {
  416. if (!EXT4_I(inode)->i_es_shk_nr++)
  417. ext4_es_list_add(inode);
  418. percpu_counter_inc(&EXT4_SB(inode->i_sb)->
  419. s_es_stats.es_stats_shk_cnt);
  420. }
  421. EXT4_I(inode)->i_es_all_nr++;
  422. percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
  423. return es;
  424. }
  425. static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
  426. {
  427. EXT4_I(inode)->i_es_all_nr--;
  428. percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
  429. /* Decrease the shrink counter when this es is not delayed */
  430. if (!ext4_es_is_delayed(es)) {
  431. BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
  432. if (!--EXT4_I(inode)->i_es_shk_nr)
  433. ext4_es_list_del(inode);
  434. percpu_counter_dec(&EXT4_SB(inode->i_sb)->
  435. s_es_stats.es_stats_shk_cnt);
  436. }
  437. kmem_cache_free(ext4_es_cachep, es);
  438. }
  439. /*
  440. * Check whether or not two extents can be merged
  441. * Condition:
  442. * - logical block number is contiguous
  443. * - physical block number is contiguous
  444. * - status is equal
  445. */
  446. static int ext4_es_can_be_merged(struct extent_status *es1,
  447. struct extent_status *es2)
  448. {
  449. if (ext4_es_type(es1) != ext4_es_type(es2))
  450. return 0;
  451. if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
  452. pr_warn("ES assertion failed when merging extents. "
  453. "The sum of lengths of es1 (%d) and es2 (%d) "
  454. "is bigger than allowed file size (%d)\n",
  455. es1->es_len, es2->es_len, EXT_MAX_BLOCKS);
  456. WARN_ON(1);
  457. return 0;
  458. }
  459. if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
  460. return 0;
  461. if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
  462. (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
  463. return 1;
  464. if (ext4_es_is_hole(es1))
  465. return 1;
  466. /* we need to check delayed extent is without unwritten status */
  467. if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
  468. return 1;
  469. return 0;
  470. }
  471. static struct extent_status *
  472. ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
  473. {
  474. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  475. struct extent_status *es1;
  476. struct rb_node *node;
  477. node = rb_prev(&es->rb_node);
  478. if (!node)
  479. return es;
  480. es1 = rb_entry(node, struct extent_status, rb_node);
  481. if (ext4_es_can_be_merged(es1, es)) {
  482. es1->es_len += es->es_len;
  483. if (ext4_es_is_referenced(es))
  484. ext4_es_set_referenced(es1);
  485. rb_erase(&es->rb_node, &tree->root);
  486. ext4_es_free_extent(inode, es);
  487. es = es1;
  488. }
  489. return es;
  490. }
  491. static struct extent_status *
  492. ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
  493. {
  494. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  495. struct extent_status *es1;
  496. struct rb_node *node;
  497. node = rb_next(&es->rb_node);
  498. if (!node)
  499. return es;
  500. es1 = rb_entry(node, struct extent_status, rb_node);
  501. if (ext4_es_can_be_merged(es, es1)) {
  502. es->es_len += es1->es_len;
  503. if (ext4_es_is_referenced(es1))
  504. ext4_es_set_referenced(es);
  505. rb_erase(node, &tree->root);
  506. ext4_es_free_extent(inode, es1);
  507. }
  508. return es;
  509. }
  510. #ifdef ES_AGGRESSIVE_TEST
  511. #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
  512. static void ext4_es_insert_extent_ext_check(struct inode *inode,
  513. struct extent_status *es)
  514. {
  515. struct ext4_ext_path *path = NULL;
  516. struct ext4_extent *ex;
  517. ext4_lblk_t ee_block;
  518. ext4_fsblk_t ee_start;
  519. unsigned short ee_len;
  520. int depth, ee_status, es_status;
  521. path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
  522. if (IS_ERR(path))
  523. return;
  524. depth = ext_depth(inode);
  525. ex = path[depth].p_ext;
  526. if (ex) {
  527. ee_block = le32_to_cpu(ex->ee_block);
  528. ee_start = ext4_ext_pblock(ex);
  529. ee_len = ext4_ext_get_actual_len(ex);
  530. ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
  531. es_status = ext4_es_is_unwritten(es) ? 1 : 0;
  532. /*
  533. * Make sure ex and es are not overlap when we try to insert
  534. * a delayed/hole extent.
  535. */
  536. if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
  537. if (in_range(es->es_lblk, ee_block, ee_len)) {
  538. pr_warn("ES insert assertion failed for "
  539. "inode: %lu we can find an extent "
  540. "at block [%d/%d/%llu/%c], but we "
  541. "want to add a delayed/hole extent "
  542. "[%d/%d/%llu/%x]\n",
  543. inode->i_ino, ee_block, ee_len,
  544. ee_start, ee_status ? 'u' : 'w',
  545. es->es_lblk, es->es_len,
  546. ext4_es_pblock(es), ext4_es_status(es));
  547. }
  548. goto out;
  549. }
  550. /*
  551. * We don't check ee_block == es->es_lblk, etc. because es
  552. * might be a part of whole extent, vice versa.
  553. */
  554. if (es->es_lblk < ee_block ||
  555. ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
  556. pr_warn("ES insert assertion failed for inode: %lu "
  557. "ex_status [%d/%d/%llu/%c] != "
  558. "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
  559. ee_block, ee_len, ee_start,
  560. ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
  561. ext4_es_pblock(es), es_status ? 'u' : 'w');
  562. goto out;
  563. }
  564. if (ee_status ^ es_status) {
  565. pr_warn("ES insert assertion failed for inode: %lu "
  566. "ex_status [%d/%d/%llu/%c] != "
  567. "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
  568. ee_block, ee_len, ee_start,
  569. ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
  570. ext4_es_pblock(es), es_status ? 'u' : 'w');
  571. }
  572. } else {
  573. /*
  574. * We can't find an extent on disk. So we need to make sure
  575. * that we don't want to add an written/unwritten extent.
  576. */
  577. if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
  578. pr_warn("ES insert assertion failed for inode: %lu "
  579. "can't find an extent at block %d but we want "
  580. "to add a written/unwritten extent "
  581. "[%d/%d/%llu/%x]\n", inode->i_ino,
  582. es->es_lblk, es->es_lblk, es->es_len,
  583. ext4_es_pblock(es), ext4_es_status(es));
  584. }
  585. }
  586. out:
  587. ext4_ext_drop_refs(path);
  588. kfree(path);
  589. }
  590. static void ext4_es_insert_extent_ind_check(struct inode *inode,
  591. struct extent_status *es)
  592. {
  593. struct ext4_map_blocks map;
  594. int retval;
  595. /*
  596. * Here we call ext4_ind_map_blocks to lookup a block mapping because
  597. * 'Indirect' structure is defined in indirect.c. So we couldn't
  598. * access direct/indirect tree from outside. It is too dirty to define
  599. * this function in indirect.c file.
  600. */
  601. map.m_lblk = es->es_lblk;
  602. map.m_len = es->es_len;
  603. retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
  604. if (retval > 0) {
  605. if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
  606. /*
  607. * We want to add a delayed/hole extent but this
  608. * block has been allocated.
  609. */
  610. pr_warn("ES insert assertion failed for inode: %lu "
  611. "We can find blocks but we want to add a "
  612. "delayed/hole extent [%d/%d/%llu/%x]\n",
  613. inode->i_ino, es->es_lblk, es->es_len,
  614. ext4_es_pblock(es), ext4_es_status(es));
  615. return;
  616. } else if (ext4_es_is_written(es)) {
  617. if (retval != es->es_len) {
  618. pr_warn("ES insert assertion failed for "
  619. "inode: %lu retval %d != es_len %d\n",
  620. inode->i_ino, retval, es->es_len);
  621. return;
  622. }
  623. if (map.m_pblk != ext4_es_pblock(es)) {
  624. pr_warn("ES insert assertion failed for "
  625. "inode: %lu m_pblk %llu != "
  626. "es_pblk %llu\n",
  627. inode->i_ino, map.m_pblk,
  628. ext4_es_pblock(es));
  629. return;
  630. }
  631. } else {
  632. /*
  633. * We don't need to check unwritten extent because
  634. * indirect-based file doesn't have it.
  635. */
  636. BUG_ON(1);
  637. }
  638. } else if (retval == 0) {
  639. if (ext4_es_is_written(es)) {
  640. pr_warn("ES insert assertion failed for inode: %lu "
  641. "We can't find the block but we want to add "
  642. "a written extent [%d/%d/%llu/%x]\n",
  643. inode->i_ino, es->es_lblk, es->es_len,
  644. ext4_es_pblock(es), ext4_es_status(es));
  645. return;
  646. }
  647. }
  648. }
  649. static inline void ext4_es_insert_extent_check(struct inode *inode,
  650. struct extent_status *es)
  651. {
  652. /*
  653. * We don't need to worry about the race condition because
  654. * caller takes i_data_sem locking.
  655. */
  656. BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
  657. if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
  658. ext4_es_insert_extent_ext_check(inode, es);
  659. else
  660. ext4_es_insert_extent_ind_check(inode, es);
  661. }
  662. #else
  663. static inline void ext4_es_insert_extent_check(struct inode *inode,
  664. struct extent_status *es)
  665. {
  666. }
  667. #endif
  668. static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
  669. {
  670. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  671. struct rb_node **p = &tree->root.rb_node;
  672. struct rb_node *parent = NULL;
  673. struct extent_status *es;
  674. while (*p) {
  675. parent = *p;
  676. es = rb_entry(parent, struct extent_status, rb_node);
  677. if (newes->es_lblk < es->es_lblk) {
  678. if (ext4_es_can_be_merged(newes, es)) {
  679. /*
  680. * Here we can modify es_lblk directly
  681. * because it isn't overlapped.
  682. */
  683. es->es_lblk = newes->es_lblk;
  684. es->es_len += newes->es_len;
  685. if (ext4_es_is_written(es) ||
  686. ext4_es_is_unwritten(es))
  687. ext4_es_store_pblock(es,
  688. newes->es_pblk);
  689. es = ext4_es_try_to_merge_left(inode, es);
  690. goto out;
  691. }
  692. p = &(*p)->rb_left;
  693. } else if (newes->es_lblk > ext4_es_end(es)) {
  694. if (ext4_es_can_be_merged(es, newes)) {
  695. es->es_len += newes->es_len;
  696. es = ext4_es_try_to_merge_right(inode, es);
  697. goto out;
  698. }
  699. p = &(*p)->rb_right;
  700. } else {
  701. BUG_ON(1);
  702. return -EINVAL;
  703. }
  704. }
  705. es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
  706. newes->es_pblk);
  707. if (!es)
  708. return -ENOMEM;
  709. rb_link_node(&es->rb_node, parent, p);
  710. rb_insert_color(&es->rb_node, &tree->root);
  711. out:
  712. tree->cache_es = es;
  713. return 0;
  714. }
  715. /*
  716. * ext4_es_insert_extent() adds information to an inode's extent
  717. * status tree.
  718. *
  719. * Return 0 on success, error code on failure.
  720. */
  721. int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
  722. ext4_lblk_t len, ext4_fsblk_t pblk,
  723. unsigned int status)
  724. {
  725. struct extent_status newes;
  726. ext4_lblk_t end = lblk + len - 1;
  727. int err = 0;
  728. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  729. es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
  730. lblk, len, pblk, status, inode->i_ino);
  731. if (!len)
  732. return 0;
  733. BUG_ON(end < lblk);
  734. if ((status & EXTENT_STATUS_DELAYED) &&
  735. (status & EXTENT_STATUS_WRITTEN)) {
  736. ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
  737. " delayed and written which can potentially "
  738. " cause data loss.", lblk, len);
  739. WARN_ON(1);
  740. }
  741. newes.es_lblk = lblk;
  742. newes.es_len = len;
  743. ext4_es_store_pblock_status(&newes, pblk, status);
  744. trace_ext4_es_insert_extent(inode, &newes);
  745. ext4_es_insert_extent_check(inode, &newes);
  746. write_lock(&EXT4_I(inode)->i_es_lock);
  747. err = __es_remove_extent(inode, lblk, end);
  748. if (err != 0)
  749. goto error;
  750. retry:
  751. err = __es_insert_extent(inode, &newes);
  752. if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
  753. 128, EXT4_I(inode)))
  754. goto retry;
  755. if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
  756. err = 0;
  757. if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
  758. (status & EXTENT_STATUS_WRITTEN ||
  759. status & EXTENT_STATUS_UNWRITTEN))
  760. __revise_pending(inode, lblk, len);
  761. error:
  762. write_unlock(&EXT4_I(inode)->i_es_lock);
  763. ext4_es_print_tree(inode);
  764. return err;
  765. }
  766. /*
  767. * ext4_es_cache_extent() inserts information into the extent status
  768. * tree if and only if there isn't information about the range in
  769. * question already.
  770. */
  771. void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
  772. ext4_lblk_t len, ext4_fsblk_t pblk,
  773. unsigned int status)
  774. {
  775. struct extent_status *es;
  776. struct extent_status newes;
  777. ext4_lblk_t end = lblk + len - 1;
  778. newes.es_lblk = lblk;
  779. newes.es_len = len;
  780. ext4_es_store_pblock_status(&newes, pblk, status);
  781. trace_ext4_es_cache_extent(inode, &newes);
  782. if (!len)
  783. return;
  784. BUG_ON(end < lblk);
  785. write_lock(&EXT4_I(inode)->i_es_lock);
  786. es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
  787. if (!es || es->es_lblk > end)
  788. __es_insert_extent(inode, &newes);
  789. write_unlock(&EXT4_I(inode)->i_es_lock);
  790. }
  791. /*
  792. * ext4_es_lookup_extent() looks up an extent in extent status tree.
  793. *
  794. * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
  795. *
  796. * Return: 1 on found, 0 on not
  797. */
  798. int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
  799. struct extent_status *es)
  800. {
  801. struct ext4_es_tree *tree;
  802. struct ext4_es_stats *stats;
  803. struct extent_status *es1 = NULL;
  804. struct rb_node *node;
  805. int found = 0;
  806. trace_ext4_es_lookup_extent_enter(inode, lblk);
  807. es_debug("lookup extent in block %u\n", lblk);
  808. tree = &EXT4_I(inode)->i_es_tree;
  809. read_lock(&EXT4_I(inode)->i_es_lock);
  810. /* find extent in cache firstly */
  811. es->es_lblk = es->es_len = es->es_pblk = 0;
  812. if (tree->cache_es) {
  813. es1 = tree->cache_es;
  814. if (in_range(lblk, es1->es_lblk, es1->es_len)) {
  815. es_debug("%u cached by [%u/%u)\n",
  816. lblk, es1->es_lblk, es1->es_len);
  817. found = 1;
  818. goto out;
  819. }
  820. }
  821. node = tree->root.rb_node;
  822. while (node) {
  823. es1 = rb_entry(node, struct extent_status, rb_node);
  824. if (lblk < es1->es_lblk)
  825. node = node->rb_left;
  826. else if (lblk > ext4_es_end(es1))
  827. node = node->rb_right;
  828. else {
  829. found = 1;
  830. break;
  831. }
  832. }
  833. out:
  834. stats = &EXT4_SB(inode->i_sb)->s_es_stats;
  835. if (found) {
  836. BUG_ON(!es1);
  837. es->es_lblk = es1->es_lblk;
  838. es->es_len = es1->es_len;
  839. es->es_pblk = es1->es_pblk;
  840. if (!ext4_es_is_referenced(es1))
  841. ext4_es_set_referenced(es1);
  842. stats->es_stats_cache_hits++;
  843. } else {
  844. stats->es_stats_cache_misses++;
  845. }
  846. read_unlock(&EXT4_I(inode)->i_es_lock);
  847. trace_ext4_es_lookup_extent_exit(inode, es, found);
  848. return found;
  849. }
  850. static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
  851. ext4_lblk_t end)
  852. {
  853. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  854. struct rb_node *node;
  855. struct extent_status *es;
  856. struct extent_status orig_es;
  857. ext4_lblk_t len1, len2;
  858. ext4_fsblk_t block;
  859. int err;
  860. retry:
  861. err = 0;
  862. es = __es_tree_search(&tree->root, lblk);
  863. if (!es)
  864. goto out;
  865. if (es->es_lblk > end)
  866. goto out;
  867. /* Simply invalidate cache_es. */
  868. tree->cache_es = NULL;
  869. orig_es.es_lblk = es->es_lblk;
  870. orig_es.es_len = es->es_len;
  871. orig_es.es_pblk = es->es_pblk;
  872. len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
  873. len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
  874. if (len1 > 0)
  875. es->es_len = len1;
  876. if (len2 > 0) {
  877. if (len1 > 0) {
  878. struct extent_status newes;
  879. newes.es_lblk = end + 1;
  880. newes.es_len = len2;
  881. block = 0x7FDEADBEEFULL;
  882. if (ext4_es_is_written(&orig_es) ||
  883. ext4_es_is_unwritten(&orig_es))
  884. block = ext4_es_pblock(&orig_es) +
  885. orig_es.es_len - len2;
  886. ext4_es_store_pblock_status(&newes, block,
  887. ext4_es_status(&orig_es));
  888. err = __es_insert_extent(inode, &newes);
  889. if (err) {
  890. es->es_lblk = orig_es.es_lblk;
  891. es->es_len = orig_es.es_len;
  892. if ((err == -ENOMEM) &&
  893. __es_shrink(EXT4_SB(inode->i_sb),
  894. 128, EXT4_I(inode)))
  895. goto retry;
  896. goto out;
  897. }
  898. } else {
  899. es->es_lblk = end + 1;
  900. es->es_len = len2;
  901. if (ext4_es_is_written(es) ||
  902. ext4_es_is_unwritten(es)) {
  903. block = orig_es.es_pblk + orig_es.es_len - len2;
  904. ext4_es_store_pblock(es, block);
  905. }
  906. }
  907. goto out;
  908. }
  909. if (len1 > 0) {
  910. node = rb_next(&es->rb_node);
  911. if (node)
  912. es = rb_entry(node, struct extent_status, rb_node);
  913. else
  914. es = NULL;
  915. }
  916. while (es && ext4_es_end(es) <= end) {
  917. node = rb_next(&es->rb_node);
  918. rb_erase(&es->rb_node, &tree->root);
  919. ext4_es_free_extent(inode, es);
  920. if (!node) {
  921. es = NULL;
  922. break;
  923. }
  924. es = rb_entry(node, struct extent_status, rb_node);
  925. }
  926. if (es && es->es_lblk < end + 1) {
  927. ext4_lblk_t orig_len = es->es_len;
  928. len1 = ext4_es_end(es) - end;
  929. es->es_lblk = end + 1;
  930. es->es_len = len1;
  931. if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
  932. block = es->es_pblk + orig_len - len1;
  933. ext4_es_store_pblock(es, block);
  934. }
  935. }
  936. out:
  937. return err;
  938. }
  939. /*
  940. * ext4_es_remove_extent() removes a space from a extent status tree.
  941. *
  942. * Return 0 on success, error code on failure.
  943. */
  944. int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
  945. ext4_lblk_t len)
  946. {
  947. ext4_lblk_t end;
  948. int err = 0;
  949. trace_ext4_es_remove_extent(inode, lblk, len);
  950. es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
  951. lblk, len, inode->i_ino);
  952. if (!len)
  953. return err;
  954. end = lblk + len - 1;
  955. BUG_ON(end < lblk);
  956. /*
  957. * ext4_clear_inode() depends on us taking i_es_lock unconditionally
  958. * so that we are sure __es_shrink() is done with the inode before it
  959. * is reclaimed.
  960. */
  961. write_lock(&EXT4_I(inode)->i_es_lock);
  962. err = __es_remove_extent(inode, lblk, end);
  963. write_unlock(&EXT4_I(inode)->i_es_lock);
  964. ext4_es_print_tree(inode);
  965. return err;
  966. }
  967. static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
  968. struct ext4_inode_info *locked_ei)
  969. {
  970. struct ext4_inode_info *ei;
  971. struct ext4_es_stats *es_stats;
  972. ktime_t start_time;
  973. u64 scan_time;
  974. int nr_to_walk;
  975. int nr_shrunk = 0;
  976. int retried = 0, nr_skipped = 0;
  977. es_stats = &sbi->s_es_stats;
  978. start_time = ktime_get();
  979. retry:
  980. spin_lock(&sbi->s_es_lock);
  981. nr_to_walk = sbi->s_es_nr_inode;
  982. while (nr_to_walk-- > 0) {
  983. if (list_empty(&sbi->s_es_list)) {
  984. spin_unlock(&sbi->s_es_lock);
  985. goto out;
  986. }
  987. ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
  988. i_es_list);
  989. /* Move the inode to the tail */
  990. list_move_tail(&ei->i_es_list, &sbi->s_es_list);
  991. /*
  992. * Normally we try hard to avoid shrinking precached inodes,
  993. * but we will as a last resort.
  994. */
  995. if (!retried && ext4_test_inode_state(&ei->vfs_inode,
  996. EXT4_STATE_EXT_PRECACHED)) {
  997. nr_skipped++;
  998. continue;
  999. }
  1000. if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
  1001. nr_skipped++;
  1002. continue;
  1003. }
  1004. /*
  1005. * Now we hold i_es_lock which protects us from inode reclaim
  1006. * freeing inode under us
  1007. */
  1008. spin_unlock(&sbi->s_es_lock);
  1009. nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
  1010. write_unlock(&ei->i_es_lock);
  1011. if (nr_to_scan <= 0)
  1012. goto out;
  1013. spin_lock(&sbi->s_es_lock);
  1014. }
  1015. spin_unlock(&sbi->s_es_lock);
  1016. /*
  1017. * If we skipped any inodes, and we weren't able to make any
  1018. * forward progress, try again to scan precached inodes.
  1019. */
  1020. if ((nr_shrunk == 0) && nr_skipped && !retried) {
  1021. retried++;
  1022. goto retry;
  1023. }
  1024. if (locked_ei && nr_shrunk == 0)
  1025. nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
  1026. out:
  1027. scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
  1028. if (likely(es_stats->es_stats_scan_time))
  1029. es_stats->es_stats_scan_time = (scan_time +
  1030. es_stats->es_stats_scan_time*3) / 4;
  1031. else
  1032. es_stats->es_stats_scan_time = scan_time;
  1033. if (scan_time > es_stats->es_stats_max_scan_time)
  1034. es_stats->es_stats_max_scan_time = scan_time;
  1035. if (likely(es_stats->es_stats_shrunk))
  1036. es_stats->es_stats_shrunk = (nr_shrunk +
  1037. es_stats->es_stats_shrunk*3) / 4;
  1038. else
  1039. es_stats->es_stats_shrunk = nr_shrunk;
  1040. trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
  1041. nr_skipped, retried);
  1042. return nr_shrunk;
  1043. }
  1044. static unsigned long ext4_es_count(struct shrinker *shrink,
  1045. struct shrink_control *sc)
  1046. {
  1047. unsigned long nr;
  1048. struct ext4_sb_info *sbi;
  1049. sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
  1050. nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
  1051. trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
  1052. return nr;
  1053. }
  1054. static unsigned long ext4_es_scan(struct shrinker *shrink,
  1055. struct shrink_control *sc)
  1056. {
  1057. struct ext4_sb_info *sbi = container_of(shrink,
  1058. struct ext4_sb_info, s_es_shrinker);
  1059. int nr_to_scan = sc->nr_to_scan;
  1060. int ret, nr_shrunk;
  1061. ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
  1062. trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
  1063. if (!nr_to_scan)
  1064. return ret;
  1065. nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
  1066. trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
  1067. return nr_shrunk;
  1068. }
  1069. int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v)
  1070. {
  1071. struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private);
  1072. struct ext4_es_stats *es_stats = &sbi->s_es_stats;
  1073. struct ext4_inode_info *ei, *max = NULL;
  1074. unsigned int inode_cnt = 0;
  1075. if (v != SEQ_START_TOKEN)
  1076. return 0;
  1077. /* here we just find an inode that has the max nr. of objects */
  1078. spin_lock(&sbi->s_es_lock);
  1079. list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
  1080. inode_cnt++;
  1081. if (max && max->i_es_all_nr < ei->i_es_all_nr)
  1082. max = ei;
  1083. else if (!max)
  1084. max = ei;
  1085. }
  1086. spin_unlock(&sbi->s_es_lock);
  1087. seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
  1088. percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
  1089. percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
  1090. seq_printf(seq, " %lu/%lu cache hits/misses\n",
  1091. es_stats->es_stats_cache_hits,
  1092. es_stats->es_stats_cache_misses);
  1093. if (inode_cnt)
  1094. seq_printf(seq, " %d inodes on list\n", inode_cnt);
  1095. seq_printf(seq, "average:\n %llu us scan time\n",
  1096. div_u64(es_stats->es_stats_scan_time, 1000));
  1097. seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk);
  1098. if (inode_cnt)
  1099. seq_printf(seq,
  1100. "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
  1101. " %llu us max scan time\n",
  1102. max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
  1103. div_u64(es_stats->es_stats_max_scan_time, 1000));
  1104. return 0;
  1105. }
  1106. int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
  1107. {
  1108. int err;
  1109. /* Make sure we have enough bits for physical block number */
  1110. BUILD_BUG_ON(ES_SHIFT < 48);
  1111. INIT_LIST_HEAD(&sbi->s_es_list);
  1112. sbi->s_es_nr_inode = 0;
  1113. spin_lock_init(&sbi->s_es_lock);
  1114. sbi->s_es_stats.es_stats_shrunk = 0;
  1115. sbi->s_es_stats.es_stats_cache_hits = 0;
  1116. sbi->s_es_stats.es_stats_cache_misses = 0;
  1117. sbi->s_es_stats.es_stats_scan_time = 0;
  1118. sbi->s_es_stats.es_stats_max_scan_time = 0;
  1119. err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
  1120. if (err)
  1121. return err;
  1122. err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
  1123. if (err)
  1124. goto err1;
  1125. sbi->s_es_shrinker.scan_objects = ext4_es_scan;
  1126. sbi->s_es_shrinker.count_objects = ext4_es_count;
  1127. sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
  1128. err = register_shrinker(&sbi->s_es_shrinker);
  1129. if (err)
  1130. goto err2;
  1131. return 0;
  1132. err2:
  1133. percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
  1134. err1:
  1135. percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
  1136. return err;
  1137. }
  1138. void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
  1139. {
  1140. percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
  1141. percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
  1142. unregister_shrinker(&sbi->s_es_shrinker);
  1143. }
  1144. /*
  1145. * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
  1146. * most *nr_to_scan extents, update *nr_to_scan accordingly.
  1147. *
  1148. * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
  1149. * Increment *nr_shrunk by the number of reclaimed extents. Also update
  1150. * ei->i_es_shrink_lblk to where we should continue scanning.
  1151. */
  1152. static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
  1153. int *nr_to_scan, int *nr_shrunk)
  1154. {
  1155. struct inode *inode = &ei->vfs_inode;
  1156. struct ext4_es_tree *tree = &ei->i_es_tree;
  1157. struct extent_status *es;
  1158. struct rb_node *node;
  1159. es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
  1160. if (!es)
  1161. goto out_wrap;
  1162. node = &es->rb_node;
  1163. while (*nr_to_scan > 0) {
  1164. if (es->es_lblk > end) {
  1165. ei->i_es_shrink_lblk = end + 1;
  1166. return 0;
  1167. }
  1168. (*nr_to_scan)--;
  1169. node = rb_next(&es->rb_node);
  1170. /*
  1171. * We can't reclaim delayed extent from status tree because
  1172. * fiemap, bigallic, and seek_data/hole need to use it.
  1173. */
  1174. if (ext4_es_is_delayed(es))
  1175. goto next;
  1176. if (ext4_es_is_referenced(es)) {
  1177. ext4_es_clear_referenced(es);
  1178. goto next;
  1179. }
  1180. rb_erase(&es->rb_node, &tree->root);
  1181. ext4_es_free_extent(inode, es);
  1182. (*nr_shrunk)++;
  1183. next:
  1184. if (!node)
  1185. goto out_wrap;
  1186. es = rb_entry(node, struct extent_status, rb_node);
  1187. }
  1188. ei->i_es_shrink_lblk = es->es_lblk;
  1189. return 1;
  1190. out_wrap:
  1191. ei->i_es_shrink_lblk = 0;
  1192. return 0;
  1193. }
  1194. static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
  1195. {
  1196. struct inode *inode = &ei->vfs_inode;
  1197. int nr_shrunk = 0;
  1198. ext4_lblk_t start = ei->i_es_shrink_lblk;
  1199. static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
  1200. DEFAULT_RATELIMIT_BURST);
  1201. if (ei->i_es_shk_nr == 0)
  1202. return 0;
  1203. if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
  1204. __ratelimit(&_rs))
  1205. ext4_warning(inode->i_sb, "forced shrink of precached extents");
  1206. if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
  1207. start != 0)
  1208. es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
  1209. ei->i_es_tree.cache_es = NULL;
  1210. return nr_shrunk;
  1211. }
  1212. #ifdef ES_DEBUG__
  1213. static void ext4_print_pending_tree(struct inode *inode)
  1214. {
  1215. struct ext4_pending_tree *tree;
  1216. struct rb_node *node;
  1217. struct pending_reservation *pr;
  1218. printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino);
  1219. tree = &EXT4_I(inode)->i_pending_tree;
  1220. node = rb_first(&tree->root);
  1221. while (node) {
  1222. pr = rb_entry(node, struct pending_reservation, rb_node);
  1223. printk(KERN_DEBUG " %u", pr->lclu);
  1224. node = rb_next(node);
  1225. }
  1226. printk(KERN_DEBUG "\n");
  1227. }
  1228. #else
  1229. #define ext4_print_pending_tree(inode)
  1230. #endif
  1231. int __init ext4_init_pending(void)
  1232. {
  1233. ext4_pending_cachep = kmem_cache_create("ext4_pending_reservation",
  1234. sizeof(struct pending_reservation),
  1235. 0, (SLAB_RECLAIM_ACCOUNT), NULL);
  1236. if (ext4_pending_cachep == NULL)
  1237. return -ENOMEM;
  1238. return 0;
  1239. }
  1240. void ext4_exit_pending(void)
  1241. {
  1242. kmem_cache_destroy(ext4_pending_cachep);
  1243. }
  1244. void ext4_init_pending_tree(struct ext4_pending_tree *tree)
  1245. {
  1246. tree->root = RB_ROOT;
  1247. }
  1248. /*
  1249. * __get_pending - retrieve a pointer to a pending reservation
  1250. *
  1251. * @inode - file containing the pending cluster reservation
  1252. * @lclu - logical cluster of interest
  1253. *
  1254. * Returns a pointer to a pending reservation if it's a member of
  1255. * the set, and NULL if not. Must be called holding i_es_lock.
  1256. */
  1257. static struct pending_reservation *__get_pending(struct inode *inode,
  1258. ext4_lblk_t lclu)
  1259. {
  1260. struct ext4_pending_tree *tree;
  1261. struct rb_node *node;
  1262. struct pending_reservation *pr = NULL;
  1263. tree = &EXT4_I(inode)->i_pending_tree;
  1264. node = (&tree->root)->rb_node;
  1265. while (node) {
  1266. pr = rb_entry(node, struct pending_reservation, rb_node);
  1267. if (lclu < pr->lclu)
  1268. node = node->rb_left;
  1269. else if (lclu > pr->lclu)
  1270. node = node->rb_right;
  1271. else if (lclu == pr->lclu)
  1272. return pr;
  1273. }
  1274. return NULL;
  1275. }
  1276. /*
  1277. * __insert_pending - adds a pending cluster reservation to the set of
  1278. * pending reservations
  1279. *
  1280. * @inode - file containing the cluster
  1281. * @lblk - logical block in the cluster to be added
  1282. *
  1283. * Returns 0 on successful insertion and -ENOMEM on failure. If the
  1284. * pending reservation is already in the set, returns successfully.
  1285. */
  1286. static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
  1287. {
  1288. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1289. struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
  1290. struct rb_node **p = &tree->root.rb_node;
  1291. struct rb_node *parent = NULL;
  1292. struct pending_reservation *pr;
  1293. ext4_lblk_t lclu;
  1294. int ret = 0;
  1295. lclu = EXT4_B2C(sbi, lblk);
  1296. /* search to find parent for insertion */
  1297. while (*p) {
  1298. parent = *p;
  1299. pr = rb_entry(parent, struct pending_reservation, rb_node);
  1300. if (lclu < pr->lclu) {
  1301. p = &(*p)->rb_left;
  1302. } else if (lclu > pr->lclu) {
  1303. p = &(*p)->rb_right;
  1304. } else {
  1305. /* pending reservation already inserted */
  1306. goto out;
  1307. }
  1308. }
  1309. pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
  1310. if (pr == NULL) {
  1311. ret = -ENOMEM;
  1312. goto out;
  1313. }
  1314. pr->lclu = lclu;
  1315. rb_link_node(&pr->rb_node, parent, p);
  1316. rb_insert_color(&pr->rb_node, &tree->root);
  1317. out:
  1318. return ret;
  1319. }
  1320. /*
  1321. * __remove_pending - removes a pending cluster reservation from the set
  1322. * of pending reservations
  1323. *
  1324. * @inode - file containing the cluster
  1325. * @lblk - logical block in the pending cluster reservation to be removed
  1326. *
  1327. * Returns successfully if pending reservation is not a member of the set.
  1328. */
  1329. static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
  1330. {
  1331. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1332. struct pending_reservation *pr;
  1333. struct ext4_pending_tree *tree;
  1334. pr = __get_pending(inode, EXT4_B2C(sbi, lblk));
  1335. if (pr != NULL) {
  1336. tree = &EXT4_I(inode)->i_pending_tree;
  1337. rb_erase(&pr->rb_node, &tree->root);
  1338. kmem_cache_free(ext4_pending_cachep, pr);
  1339. }
  1340. }
  1341. /*
  1342. * ext4_remove_pending - removes a pending cluster reservation from the set
  1343. * of pending reservations
  1344. *
  1345. * @inode - file containing the cluster
  1346. * @lblk - logical block in the pending cluster reservation to be removed
  1347. *
  1348. * Locking for external use of __remove_pending.
  1349. */
  1350. void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk)
  1351. {
  1352. struct ext4_inode_info *ei = EXT4_I(inode);
  1353. write_lock(&ei->i_es_lock);
  1354. __remove_pending(inode, lblk);
  1355. write_unlock(&ei->i_es_lock);
  1356. }
  1357. /*
  1358. * ext4_is_pending - determine whether a cluster has a pending reservation
  1359. * on it
  1360. *
  1361. * @inode - file containing the cluster
  1362. * @lblk - logical block in the cluster
  1363. *
  1364. * Returns true if there's a pending reservation for the cluster in the
  1365. * set of pending reservations, and false if not.
  1366. */
  1367. bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
  1368. {
  1369. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1370. struct ext4_inode_info *ei = EXT4_I(inode);
  1371. bool ret;
  1372. read_lock(&ei->i_es_lock);
  1373. ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL);
  1374. read_unlock(&ei->i_es_lock);
  1375. return ret;
  1376. }
  1377. /*
  1378. * ext4_es_insert_delayed_block - adds a delayed block to the extents status
  1379. * tree, adding a pending reservation where
  1380. * needed
  1381. *
  1382. * @inode - file containing the newly added block
  1383. * @lblk - logical block to be added
  1384. * @allocated - indicates whether a physical cluster has been allocated for
  1385. * the logical cluster that contains the block
  1386. *
  1387. * Returns 0 on success, negative error code on failure.
  1388. */
  1389. int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
  1390. bool allocated)
  1391. {
  1392. struct extent_status newes;
  1393. int err = 0;
  1394. es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
  1395. lblk, inode->i_ino);
  1396. newes.es_lblk = lblk;
  1397. newes.es_len = 1;
  1398. ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
  1399. trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
  1400. ext4_es_insert_extent_check(inode, &newes);
  1401. write_lock(&EXT4_I(inode)->i_es_lock);
  1402. err = __es_remove_extent(inode, lblk, lblk);
  1403. if (err != 0)
  1404. goto error;
  1405. retry:
  1406. err = __es_insert_extent(inode, &newes);
  1407. if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
  1408. 128, EXT4_I(inode)))
  1409. goto retry;
  1410. if (err != 0)
  1411. goto error;
  1412. if (allocated)
  1413. __insert_pending(inode, lblk);
  1414. error:
  1415. write_unlock(&EXT4_I(inode)->i_es_lock);
  1416. ext4_es_print_tree(inode);
  1417. ext4_print_pending_tree(inode);
  1418. return err;
  1419. }
  1420. /*
  1421. * __es_delayed_clu - count number of clusters containing blocks that
  1422. * are delayed only
  1423. *
  1424. * @inode - file containing block range
  1425. * @start - logical block defining start of range
  1426. * @end - logical block defining end of range
  1427. *
  1428. * Returns the number of clusters containing only delayed (not delayed
  1429. * and unwritten) blocks in the range specified by @start and @end. Any
  1430. * cluster or part of a cluster within the range and containing a delayed
  1431. * and not unwritten block within the range is counted as a whole cluster.
  1432. */
  1433. static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
  1434. ext4_lblk_t end)
  1435. {
  1436. struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
  1437. struct extent_status *es;
  1438. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1439. struct rb_node *node;
  1440. ext4_lblk_t first_lclu, last_lclu;
  1441. unsigned long long last_counted_lclu;
  1442. unsigned int n = 0;
  1443. /* guaranteed to be unequal to any ext4_lblk_t value */
  1444. last_counted_lclu = ~0ULL;
  1445. es = __es_tree_search(&tree->root, start);
  1446. while (es && (es->es_lblk <= end)) {
  1447. if (ext4_es_is_delonly(es)) {
  1448. if (es->es_lblk <= start)
  1449. first_lclu = EXT4_B2C(sbi, start);
  1450. else
  1451. first_lclu = EXT4_B2C(sbi, es->es_lblk);
  1452. if (ext4_es_end(es) >= end)
  1453. last_lclu = EXT4_B2C(sbi, end);
  1454. else
  1455. last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
  1456. if (first_lclu == last_counted_lclu)
  1457. n += last_lclu - first_lclu;
  1458. else
  1459. n += last_lclu - first_lclu + 1;
  1460. last_counted_lclu = last_lclu;
  1461. }
  1462. node = rb_next(&es->rb_node);
  1463. if (!node)
  1464. break;
  1465. es = rb_entry(node, struct extent_status, rb_node);
  1466. }
  1467. return n;
  1468. }
  1469. /*
  1470. * ext4_es_delayed_clu - count number of clusters containing blocks that
  1471. * are both delayed and unwritten
  1472. *
  1473. * @inode - file containing block range
  1474. * @lblk - logical block defining start of range
  1475. * @len - number of blocks in range
  1476. *
  1477. * Locking for external use of __es_delayed_clu().
  1478. */
  1479. unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
  1480. ext4_lblk_t len)
  1481. {
  1482. struct ext4_inode_info *ei = EXT4_I(inode);
  1483. ext4_lblk_t end;
  1484. unsigned int n;
  1485. if (len == 0)
  1486. return 0;
  1487. end = lblk + len - 1;
  1488. WARN_ON(end < lblk);
  1489. read_lock(&ei->i_es_lock);
  1490. n = __es_delayed_clu(inode, lblk, end);
  1491. read_unlock(&ei->i_es_lock);
  1492. return n;
  1493. }
  1494. /*
  1495. * __revise_pending - makes, cancels, or leaves unchanged pending cluster
  1496. * reservations for a specified block range depending
  1497. * upon the presence or absence of delayed blocks
  1498. * outside the range within clusters at the ends of the
  1499. * range
  1500. *
  1501. * @inode - file containing the range
  1502. * @lblk - logical block defining the start of range
  1503. * @len - length of range in blocks
  1504. *
  1505. * Used after a newly allocated extent is added to the extents status tree.
  1506. * Requires that the extents in the range have either written or unwritten
  1507. * status. Must be called while holding i_es_lock.
  1508. */
  1509. static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
  1510. ext4_lblk_t len)
  1511. {
  1512. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1513. ext4_lblk_t end = lblk + len - 1;
  1514. ext4_lblk_t first, last;
  1515. bool f_del = false, l_del = false;
  1516. if (len == 0)
  1517. return;
  1518. /*
  1519. * Two cases - block range within single cluster and block range
  1520. * spanning two or more clusters. Note that a cluster belonging
  1521. * to a range starting and/or ending on a cluster boundary is treated
  1522. * as if it does not contain a delayed extent. The new range may
  1523. * have allocated space for previously delayed blocks out to the
  1524. * cluster boundary, requiring that any pre-existing pending
  1525. * reservation be canceled. Because this code only looks at blocks
  1526. * outside the range, it should revise pending reservations
  1527. * correctly even if the extent represented by the range can't be
  1528. * inserted in the extents status tree due to ENOSPC.
  1529. */
  1530. if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
  1531. first = EXT4_LBLK_CMASK(sbi, lblk);
  1532. if (first != lblk)
  1533. f_del = __es_scan_range(inode, &ext4_es_is_delonly,
  1534. first, lblk - 1);
  1535. if (f_del) {
  1536. __insert_pending(inode, first);
  1537. } else {
  1538. last = EXT4_LBLK_CMASK(sbi, end) +
  1539. sbi->s_cluster_ratio - 1;
  1540. if (last != end)
  1541. l_del = __es_scan_range(inode,
  1542. &ext4_es_is_delonly,
  1543. end + 1, last);
  1544. if (l_del)
  1545. __insert_pending(inode, last);
  1546. else
  1547. __remove_pending(inode, last);
  1548. }
  1549. } else {
  1550. first = EXT4_LBLK_CMASK(sbi, lblk);
  1551. if (first != lblk)
  1552. f_del = __es_scan_range(inode, &ext4_es_is_delonly,
  1553. first, lblk - 1);
  1554. if (f_del)
  1555. __insert_pending(inode, first);
  1556. else
  1557. __remove_pending(inode, first);
  1558. last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
  1559. if (last != end)
  1560. l_del = __es_scan_range(inode, &ext4_es_is_delonly,
  1561. end + 1, last);
  1562. if (l_del)
  1563. __insert_pending(inode, last);
  1564. else
  1565. __remove_pending(inode, last);
  1566. }
  1567. }
  1568. /*
  1569. * ext4_es_remove_blks - remove block range from extents status tree and
  1570. * reduce reservation count or cancel pending
  1571. * reservation as needed
  1572. *
  1573. * @inode - file containing range
  1574. * @lblk - first block in range
  1575. * @len - number of blocks to remove
  1576. *
  1577. */
  1578. void ext4_es_remove_blks(struct inode *inode, ext4_lblk_t lblk,
  1579. ext4_lblk_t len)
  1580. {
  1581. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1582. unsigned int clu_size, reserved = 0;
  1583. ext4_lblk_t last_lclu, first, length, remainder, last;
  1584. bool delonly;
  1585. int err = 0;
  1586. struct pending_reservation *pr;
  1587. struct ext4_pending_tree *tree;
  1588. /*
  1589. * Process cluster by cluster for bigalloc - there may be up to
  1590. * two clusters in a 4k page with a 1k block size and two blocks
  1591. * per cluster. Also necessary for systems with larger page sizes
  1592. * and potentially larger block sizes.
  1593. */
  1594. clu_size = sbi->s_cluster_ratio;
  1595. last_lclu = EXT4_B2C(sbi, lblk + len - 1);
  1596. write_lock(&EXT4_I(inode)->i_es_lock);
  1597. for (first = lblk, remainder = len;
  1598. remainder > 0;
  1599. first += length, remainder -= length) {
  1600. if (EXT4_B2C(sbi, first) == last_lclu)
  1601. length = remainder;
  1602. else
  1603. length = clu_size - EXT4_LBLK_COFF(sbi, first);
  1604. /*
  1605. * The BH_Delay flag, which triggers calls to this function,
  1606. * and the contents of the extents status tree can be
  1607. * inconsistent due to writepages activity. So, note whether
  1608. * the blocks to be removed actually belong to an extent with
  1609. * delayed only status.
  1610. */
  1611. delonly = __es_scan_clu(inode, &ext4_es_is_delonly, first);
  1612. /*
  1613. * because of the writepages effect, written and unwritten
  1614. * blocks could be removed here
  1615. */
  1616. last = first + length - 1;
  1617. err = __es_remove_extent(inode, first, last);
  1618. if (err)
  1619. ext4_warning(inode->i_sb,
  1620. "%s: couldn't remove page (err = %d)",
  1621. __func__, err);
  1622. /* non-bigalloc case: simply count the cluster for release */
  1623. if (sbi->s_cluster_ratio == 1 && delonly) {
  1624. reserved++;
  1625. continue;
  1626. }
  1627. /*
  1628. * bigalloc case: if all delayed allocated only blocks have
  1629. * just been removed from a cluster, either cancel a pending
  1630. * reservation if it exists or count a cluster for release
  1631. */
  1632. if (delonly &&
  1633. !__es_scan_clu(inode, &ext4_es_is_delonly, first)) {
  1634. pr = __get_pending(inode, EXT4_B2C(sbi, first));
  1635. if (pr != NULL) {
  1636. tree = &EXT4_I(inode)->i_pending_tree;
  1637. rb_erase(&pr->rb_node, &tree->root);
  1638. kmem_cache_free(ext4_pending_cachep, pr);
  1639. } else {
  1640. reserved++;
  1641. }
  1642. }
  1643. }
  1644. write_unlock(&EXT4_I(inode)->i_es_lock);
  1645. ext4_da_release_space(inode, reserved);
  1646. }