|
@@ -45,8 +45,8 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
|
|
|
* give 25%, 25%, 50%, 50%, 50% memory for each components respectively
|
|
|
*/
|
|
|
if (type == FREE_NIDS) {
|
|
|
- mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
|
|
|
- PAGE_SHIFT;
|
|
|
+ mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
|
|
|
+ sizeof(struct free_nid)) >> PAGE_SHIFT;
|
|
|
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
|
|
|
} else if (type == NAT_ENTRIES) {
|
|
|
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
|
|
@@ -1698,10 +1698,31 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
|
|
|
static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
|
|
|
struct free_nid *i)
|
|
|
{
|
|
|
- list_del(&i->list);
|
|
|
radix_tree_delete(&nm_i->free_nid_root, i->nid);
|
|
|
}
|
|
|
|
|
|
+static void __insert_nid_to_list(struct f2fs_sb_info *sbi,
|
|
|
+ struct free_nid *i, enum nid_list list)
|
|
|
+{
|
|
|
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
+
|
|
|
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
|
|
|
+ i->state != NID_ALLOC);
|
|
|
+ nm_i->nid_cnt[list]++;
|
|
|
+ list_add_tail(&i->list, &nm_i->nid_list[list]);
|
|
|
+}
|
|
|
+
|
|
|
+static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
|
|
|
+ struct free_nid *i, enum nid_list list)
|
|
|
+{
|
|
|
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
+
|
|
|
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
|
|
|
+ i->state != NID_ALLOC);
|
|
|
+ nm_i->nid_cnt[list]--;
|
|
|
+ list_del(&i->list);
|
|
|
+}
|
|
|
+
|
|
|
static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
|
|
|
{
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
@@ -1732,33 +1753,33 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- spin_lock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_lock(&nm_i->nid_list_lock);
|
|
|
if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
radix_tree_preload_end();
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
|
return 0;
|
|
|
}
|
|
|
- list_add_tail(&i->list, &nm_i->free_nid_list);
|
|
|
- nm_i->fcnt++;
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ __insert_nid_to_list(sbi, i, FREE_NID_LIST);
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
radix_tree_preload_end();
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
|
|
|
+static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
{
|
|
|
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
struct free_nid *i;
|
|
|
bool need_free = false;
|
|
|
|
|
|
- spin_lock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_lock(&nm_i->nid_list_lock);
|
|
|
i = __lookup_free_nid_list(nm_i, nid);
|
|
|
if (i && i->state == NID_NEW) {
|
|
|
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST);
|
|
|
__del_from_free_nid_list(nm_i, i);
|
|
|
- nm_i->fcnt--;
|
|
|
need_free = true;
|
|
|
}
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
|
|
|
if (need_free)
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
@@ -1797,7 +1818,7 @@ void __build_free_nids(struct f2fs_sb_info *sbi)
|
|
|
nid_t nid = nm_i->next_scan_nid;
|
|
|
|
|
|
/* Enough entries */
|
|
|
- if (nm_i->fcnt >= NAT_ENTRY_PER_BLOCK)
|
|
|
+ if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
|
|
|
return;
|
|
|
|
|
|
/* readahead nat pages to be scanned */
|
|
@@ -1833,7 +1854,7 @@ void __build_free_nids(struct f2fs_sb_info *sbi)
|
|
|
if (addr == NULL_ADDR)
|
|
|
add_free_nid(sbi, nid, true);
|
|
|
else
|
|
|
- remove_free_nid(nm_i, nid);
|
|
|
+ remove_free_nid(sbi, nid);
|
|
|
}
|
|
|
up_read(&curseg->journal_rwsem);
|
|
|
up_read(&nm_i->nat_tree_lock);
|
|
@@ -1866,23 +1887,22 @@ retry:
|
|
|
if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
|
|
|
return false;
|
|
|
|
|
|
- spin_lock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_lock(&nm_i->nid_list_lock);
|
|
|
|
|
|
/* We should not use stale free nids created by build_free_nids */
|
|
|
- if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
|
|
|
- f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
|
|
|
- list_for_each_entry(i, &nm_i->free_nid_list, list)
|
|
|
- if (i->state == NID_NEW)
|
|
|
- break;
|
|
|
-
|
|
|
- f2fs_bug_on(sbi, i->state != NID_NEW);
|
|
|
+ if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
|
|
|
+ f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
|
|
|
+ i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
|
|
|
+ struct free_nid, list);
|
|
|
*nid = i->nid;
|
|
|
+
|
|
|
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST);
|
|
|
i->state = NID_ALLOC;
|
|
|
- nm_i->fcnt--;
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ __insert_nid_to_list(sbi, i, ALLOC_NID_LIST);
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
return true;
|
|
|
}
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
|
|
|
/* Let's scan nat pages and its caches to get free nids */
|
|
|
build_free_nids(sbi);
|
|
@@ -1897,11 +1917,12 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
struct free_nid *i;
|
|
|
|
|
|
- spin_lock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_lock(&nm_i->nid_list_lock);
|
|
|
i = __lookup_free_nid_list(nm_i, nid);
|
|
|
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
|
|
|
+ f2fs_bug_on(sbi, !i);
|
|
|
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST);
|
|
|
__del_from_free_nid_list(nm_i, i);
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
|
}
|
|
@@ -1918,17 +1939,20 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
if (!nid)
|
|
|
return;
|
|
|
|
|
|
- spin_lock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_lock(&nm_i->nid_list_lock);
|
|
|
i = __lookup_free_nid_list(nm_i, nid);
|
|
|
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
|
|
|
+ f2fs_bug_on(sbi, !i);
|
|
|
+
|
|
|
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST);
|
|
|
+
|
|
|
if (!available_free_memory(sbi, FREE_NIDS)) {
|
|
|
__del_from_free_nid_list(nm_i, i);
|
|
|
need_free = true;
|
|
|
} else {
|
|
|
i->state = NID_NEW;
|
|
|
- nm_i->fcnt++;
|
|
|
+ __insert_nid_to_list(sbi, i, FREE_NID_LIST);
|
|
|
}
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
|
|
|
if (need_free)
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
@@ -1940,24 +1964,26 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
|
|
|
struct free_nid *i, *next;
|
|
|
int nr = nr_shrink;
|
|
|
|
|
|
- if (nm_i->fcnt <= MAX_FREE_NIDS)
|
|
|
+ if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
|
|
|
return 0;
|
|
|
|
|
|
if (!mutex_trylock(&nm_i->build_lock))
|
|
|
return 0;
|
|
|
|
|
|
- spin_lock(&nm_i->free_nid_list_lock);
|
|
|
- list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
|
|
|
- if (nr_shrink <= 0 || nm_i->fcnt <= MAX_FREE_NIDS)
|
|
|
+ spin_lock(&nm_i->nid_list_lock);
|
|
|
+ list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
|
|
|
+ list) {
|
|
|
+ if (nr_shrink <= 0 ||
|
|
|
+ nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
|
|
|
break;
|
|
|
- if (i->state == NID_ALLOC)
|
|
|
- continue;
|
|
|
+
|
|
|
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST);
|
|
|
__del_from_free_nid_list(nm_i, i);
|
|
|
+
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
|
- nm_i->fcnt--;
|
|
|
nr_shrink--;
|
|
|
}
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
mutex_unlock(&nm_i->build_lock);
|
|
|
|
|
|
return nr - nr_shrink;
|
|
@@ -2013,7 +2039,7 @@ recover_xnid:
|
|
|
if (unlikely(!inc_valid_node_count(sbi, inode)))
|
|
|
f2fs_bug_on(sbi, 1);
|
|
|
|
|
|
- remove_free_nid(NM_I(sbi), new_xnid);
|
|
|
+ remove_free_nid(sbi, new_xnid);
|
|
|
get_node_info(sbi, new_xnid, &ni);
|
|
|
ni.ino = inode->i_ino;
|
|
|
set_node_addr(sbi, &ni, NEW_ADDR, false);
|
|
@@ -2043,7 +2069,7 @@ retry:
|
|
|
}
|
|
|
|
|
|
/* Should not use this inode from free nid list */
|
|
|
- remove_free_nid(NM_I(sbi), ino);
|
|
|
+ remove_free_nid(sbi, ino);
|
|
|
|
|
|
if (!PageUptodate(ipage))
|
|
|
SetPageUptodate(ipage);
|
|
@@ -2277,20 +2303,22 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
|
|
|
|
|
|
/* not used nids: 0, node, meta, (and root counted as valid node) */
|
|
|
nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
|
|
|
- nm_i->fcnt = 0;
|
|
|
+ nm_i->nid_cnt[FREE_NID_LIST] = 0;
|
|
|
+ nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
|
|
|
nm_i->nat_cnt = 0;
|
|
|
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
|
|
|
nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
|
|
|
nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
|
|
|
|
|
|
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
|
|
|
- INIT_LIST_HEAD(&nm_i->free_nid_list);
|
|
|
+ INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
|
|
|
+ INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
|
|
|
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
|
|
|
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
|
|
|
INIT_LIST_HEAD(&nm_i->nat_entries);
|
|
|
|
|
|
mutex_init(&nm_i->build_lock);
|
|
|
- spin_lock_init(&nm_i->free_nid_list_lock);
|
|
|
+ spin_lock_init(&nm_i->nid_list_lock);
|
|
|
init_rwsem(&nm_i->nat_tree_lock);
|
|
|
|
|
|
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
|
|
@@ -2335,17 +2363,19 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
|
|
|
return;
|
|
|
|
|
|
/* destroy free nid list */
|
|
|
- spin_lock(&nm_i->free_nid_list_lock);
|
|
|
- list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
|
|
|
- f2fs_bug_on(sbi, i->state == NID_ALLOC);
|
|
|
+ spin_lock(&nm_i->nid_list_lock);
|
|
|
+ list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
|
|
|
+ list) {
|
|
|
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST);
|
|
|
__del_from_free_nid_list(nm_i, i);
|
|
|
- nm_i->fcnt--;
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
|
- spin_lock(&nm_i->free_nid_list_lock);
|
|
|
+ spin_lock(&nm_i->nid_list_lock);
|
|
|
}
|
|
|
- f2fs_bug_on(sbi, nm_i->fcnt);
|
|
|
- spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
|
|
|
+ f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
|
|
|
+ f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
|
|
|
+ spin_unlock(&nm_i->nid_list_lock);
|
|
|
|
|
|
/* destroy nat cache */
|
|
|
down_write(&nm_i->nat_tree_lock);
|