|
@@ -1765,7 +1765,8 @@ static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
|
|
|
radix_tree_delete(&nm_i->free_nid_root, i->nid);
|
|
|
}
|
|
|
|
|
|
-static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
|
|
|
+/* return if the nid is recognized as free */
|
|
|
+static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
|
|
|
{
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
struct free_nid *i;
|
|
@@ -1774,14 +1775,14 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
|
|
|
|
|
|
/* 0 nid should not be used */
|
|
|
if (unlikely(nid == 0))
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
|
|
|
if (build) {
|
|
|
/* do not add allocated nids */
|
|
|
ne = __lookup_nat_cache(nm_i, nid);
|
|
|
if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
|
|
|
nat_get_blkaddr(ne) != NULL_ADDR))
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
|
|
@@ -1790,7 +1791,7 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
|
|
|
|
|
|
if (radix_tree_preload(GFP_NOFS)) {
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
|
- return 0;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
spin_lock(&nm_i->nid_list_lock);
|
|
@@ -1799,9 +1800,9 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
|
|
|
radix_tree_preload_end();
|
|
|
if (err) {
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
|
- return 0;
|
|
|
+ return true;
|
|
|
}
|
|
|
- return 1;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
|
|
@@ -1822,17 +1823,36 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
|
}
|
|
|
|
|
|
+void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
|
|
|
+{
|
|
|
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
+ unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
|
|
|
+ unsigned int nid_ofs = nid - START_NID(nid);
|
|
|
+
|
|
|
+ if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (set)
|
|
|
+ set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
|
|
|
+ else
|
|
|
+ clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
|
|
|
+}
|
|
|
+
|
|
|
static void scan_nat_page(struct f2fs_sb_info *sbi,
|
|
|
struct page *nat_page, nid_t start_nid)
|
|
|
{
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
struct f2fs_nat_block *nat_blk = page_address(nat_page);
|
|
|
block_t blk_addr;
|
|
|
+ unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
|
|
|
int i;
|
|
|
|
|
|
+ set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
|
|
|
+
|
|
|
i = start_nid % NAT_ENTRY_PER_BLOCK;
|
|
|
|
|
|
for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
|
|
|
+ bool freed = false;
|
|
|
|
|
|
if (unlikely(start_nid >= nm_i->max_nid))
|
|
|
break;
|
|
@@ -1840,8 +1860,52 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
|
|
|
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
|
|
|
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
|
|
|
if (blk_addr == NULL_ADDR)
|
|
|
- add_free_nid(sbi, start_nid, true);
|
|
|
+ freed = add_free_nid(sbi, start_nid, true);
|
|
|
+ update_free_nid_bitmap(sbi, start_nid, freed);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
|
|
|
+{
|
|
|
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
|
|
|
+ struct f2fs_journal *journal = curseg->journal;
|
|
|
+ unsigned int i, idx;
|
|
|
+ unsigned int target = FREE_NID_PAGES * NAT_ENTRY_PER_BLOCK;
|
|
|
+
|
|
|
+ down_read(&nm_i->nat_tree_lock);
|
|
|
+
|
|
|
+ for (i = 0; i < nm_i->nat_blocks; i++) {
|
|
|
+ if (!test_bit_le(i, nm_i->nat_block_bitmap))
|
|
|
+ continue;
|
|
|
+ for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
|
|
|
+ nid_t nid;
|
|
|
+
|
|
|
+ if (!test_bit_le(idx, nm_i->free_nid_bitmap[i]))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ nid = i * NAT_ENTRY_PER_BLOCK + idx;
|
|
|
+ add_free_nid(sbi, nid, true);
|
|
|
+
|
|
|
+ if (nm_i->nid_cnt[FREE_NID_LIST] >= target)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ }
|
|
|
+out:
|
|
|
+ down_read(&curseg->journal_rwsem);
|
|
|
+ for (i = 0; i < nats_in_cursum(journal); i++) {
|
|
|
+ block_t addr;
|
|
|
+ nid_t nid;
|
|
|
+
|
|
|
+ addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
|
|
|
+ nid = le32_to_cpu(nid_in_journal(journal, i));
|
|
|
+ if (addr == NULL_ADDR)
|
|
|
+ add_free_nid(sbi, nid, true);
|
|
|
+ else
|
|
|
+ remove_free_nid(sbi, nid);
|
|
|
}
|
|
|
+ up_read(&curseg->journal_rwsem);
|
|
|
+ up_read(&nm_i->nat_tree_lock);
|
|
|
}
|
|
|
|
|
|
static int scan_nat_bits(struct f2fs_sb_info *sbi)
|
|
@@ -1912,9 +1976,17 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
|
|
|
if (!sync && !available_free_memory(sbi, FREE_NIDS))
|
|
|
return;
|
|
|
|
|
|
- /* try to find free nids with nat_bits */
|
|
|
- if (!mount && !scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
|
|
|
- return;
|
|
|
+ if (!mount) {
|
|
|
+ /* try to find free nids in free_nid_bitmap */
|
|
|
+ scan_free_nid_bits(sbi);
|
|
|
+
|
|
|
+ if (nm_i->nid_cnt[FREE_NID_LIST])
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* try to find free nids with nat_bits */
|
|
|
+ if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
/* find next valid candidate */
|
|
|
if (enabled_nat_bits(sbi, NULL)) {
|
|
@@ -2010,6 +2082,9 @@ retry:
|
|
|
i->state = NID_ALLOC;
|
|
|
__insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
|
|
|
nm_i->available_nids--;
|
|
|
+
|
|
|
+ update_free_nid_bitmap(sbi, *nid, false);
|
|
|
+
|
|
|
spin_unlock(&nm_i->nid_list_lock);
|
|
|
return true;
|
|
|
}
|
|
@@ -2064,6 +2139,8 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
|
|
|
nm_i->available_nids++;
|
|
|
|
|
|
+ update_free_nid_bitmap(sbi, nid, true);
|
|
|
+
|
|
|
spin_unlock(&nm_i->nid_list_lock);
|
|
|
|
|
|
if (need_free)
|
|
@@ -2392,6 +2469,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
|
|
|
add_free_nid(sbi, nid, false);
|
|
|
spin_lock(&NM_I(sbi)->nid_list_lock);
|
|
|
NM_I(sbi)->available_nids++;
|
|
|
+ update_free_nid_bitmap(sbi, nid, true);
|
|
|
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
|
|
|
+ } else {
|
|
|
+ spin_lock(&NM_I(sbi)->nid_list_lock);
|
|
|
+ update_free_nid_bitmap(sbi, nid, false);
|
|
|
spin_unlock(&NM_I(sbi)->nid_list_lock);
|
|
|
}
|
|
|
}
|
|
@@ -2558,6 +2640,22 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+int init_free_nid_cache(struct f2fs_sb_info *sbi)
|
|
|
+{
|
|
|
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
+
|
|
|
+ nm_i->free_nid_bitmap = f2fs_kvzalloc(nm_i->nat_blocks *
|
|
|
+ NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
|
|
|
+ if (!nm_i->free_nid_bitmap)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ nm_i->nat_block_bitmap = f2fs_kvzalloc(nm_i->nat_blocks / 8,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!nm_i->nat_block_bitmap)
|
|
|
+ return -ENOMEM;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
int build_node_manager(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
int err;
|
|
@@ -2570,6 +2668,10 @@ int build_node_manager(struct f2fs_sb_info *sbi)
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
+ err = init_free_nid_cache(sbi);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
build_free_nids(sbi, true, true);
|
|
|
return 0;
|
|
|
}
|
|
@@ -2628,6 +2730,9 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
|
|
|
}
|
|
|
up_write(&nm_i->nat_tree_lock);
|
|
|
|
|
|
+ kvfree(nm_i->nat_block_bitmap);
|
|
|
+ kvfree(nm_i->free_nid_bitmap);
|
|
|
+
|
|
|
kfree(nm_i->nat_bitmap);
|
|
|
kfree(nm_i->nat_bits);
|
|
|
#ifdef CONFIG_F2FS_CHECK_FS
|