|
@@ -298,7 +298,7 @@ out:
|
|
|
trace_ext4_es_find_delayed_extent_range_exit(inode, es);
|
|
|
}
|
|
|
|
|
|
-void ext4_es_list_add(struct inode *inode)
|
|
|
+static void ext4_es_list_add(struct inode *inode)
|
|
|
{
|
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
@@ -314,7 +314,7 @@ void ext4_es_list_add(struct inode *inode)
|
|
|
spin_unlock(&sbi->s_es_lock);
|
|
|
}
|
|
|
|
|
|
-void ext4_es_list_del(struct inode *inode)
|
|
|
+static void ext4_es_list_del(struct inode *inode)
|
|
|
{
|
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
@@ -344,7 +344,8 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
|
|
|
* We don't count delayed extent because we never try to reclaim them
|
|
|
*/
|
|
|
if (!ext4_es_is_delayed(es)) {
|
|
|
- EXT4_I(inode)->i_es_shk_nr++;
|
|
|
+ if (!EXT4_I(inode)->i_es_shk_nr++)
|
|
|
+ ext4_es_list_add(inode);
|
|
|
percpu_counter_inc(&EXT4_SB(inode->i_sb)->
|
|
|
s_es_stats.es_stats_shk_cnt);
|
|
|
}
|
|
@@ -363,7 +364,8 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
|
|
|
/* Decrease the shrink counter when this es is not delayed */
|
|
|
if (!ext4_es_is_delayed(es)) {
|
|
|
BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
|
|
|
- EXT4_I(inode)->i_es_shk_nr--;
|
|
|
+ if (!--EXT4_I(inode)->i_es_shk_nr)
|
|
|
+ ext4_es_list_del(inode);
|
|
|
percpu_counter_dec(&EXT4_SB(inode->i_sb)->
|
|
|
s_es_stats.es_stats_shk_cnt);
|
|
|
}
|