|
@@ -158,9 +158,6 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
|
|
nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
|
|
nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
|
|
struct nat_entry_set *head;
|
|
struct nat_entry_set *head;
|
|
|
|
|
|
- if (get_nat_flag(ne, IS_DIRTY))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
head = radix_tree_lookup(&nm_i->nat_set_root, set);
|
|
head = radix_tree_lookup(&nm_i->nat_set_root, set);
|
|
if (!head) {
|
|
if (!head) {
|
|
head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
|
|
head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
|
|
@@ -171,10 +168,18 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
|
|
head->entry_cnt = 0;
|
|
head->entry_cnt = 0;
|
|
f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
|
|
f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
|
|
}
|
|
}
|
|
- list_move_tail(&ne->list, &head->entry_list);
|
|
|
|
|
|
+
|
|
|
|
+ if (get_nat_flag(ne, IS_DIRTY))
|
|
|
|
+ goto refresh_list;
|
|
|
|
+
|
|
nm_i->dirty_nat_cnt++;
|
|
nm_i->dirty_nat_cnt++;
|
|
head->entry_cnt++;
|
|
head->entry_cnt++;
|
|
set_nat_flag(ne, IS_DIRTY, true);
|
|
set_nat_flag(ne, IS_DIRTY, true);
|
|
|
|
+refresh_list:
|
|
|
|
+ if (nat_get_blkaddr(ne) == NEW_ADDR)
|
|
|
|
+ list_del_init(&ne->list);
|
|
|
|
+ else
|
|
|
|
+ list_move_tail(&ne->list, &head->entry_list);
|
|
}
|
|
}
|
|
|
|
|
|
static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
|
|
static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
|
|
@@ -2423,8 +2428,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
|
|
nid_t nid = nat_get_nid(ne);
|
|
nid_t nid = nat_get_nid(ne);
|
|
int offset;
|
|
int offset;
|
|
|
|
|
|
- if (nat_get_blkaddr(ne) == NEW_ADDR)
|
|
|
|
- continue;
|
|
|
|
|
|
+ f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
|
|
|
|
|
|
if (to_journal) {
|
|
if (to_journal) {
|
|
offset = lookup_journal_in_cursum(journal,
|
|
offset = lookup_journal_in_cursum(journal,
|