|
@@ -21,9 +21,27 @@
|
|
|
#include "segment.h"
|
|
|
#include <trace/events/f2fs.h>
|
|
|
|
|
|
+#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
|
|
|
+
|
|
|
static struct kmem_cache *nat_entry_slab;
|
|
|
static struct kmem_cache *free_nid_slab;
|
|
|
|
|
|
+static inline bool available_free_memory(struct f2fs_nm_info *nm_i, int type)
|
|
|
+{
|
|
|
+ struct sysinfo val;
|
|
|
+ unsigned long mem_size = 0;
|
|
|
+
|
|
|
+ si_meminfo(&val);
|
|
|
+ if (type == FREE_NIDS)
|
|
|
+ mem_size = nm_i->fcnt * sizeof(struct free_nid);
|
|
|
+ else if (type == NAT_ENTRIES)
|
|
|
+ mem_size += nm_i->nat_cnt * sizeof(struct nat_entry);
|
|
|
+ mem_size >>= 12;
|
|
|
+
|
|
|
+ /* give 50:50 memory for free nids and nat caches respectively */
|
|
|
+ return (mem_size < ((val.totalram * nm_i->ram_thresh) >> 11));
|
|
|
+}
|
|
|
+
|
|
|
static void clear_node_page_dirty(struct page *page)
|
|
|
{
|
|
|
struct address_space *mapping = page->mapping;
|
|
@@ -82,42 +100,6 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
return dst_page;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Readahead NAT pages
|
|
|
- */
|
|
|
-static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
|
|
|
-{
|
|
|
- struct address_space *mapping = META_MAPPING(sbi);
|
|
|
- struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
- struct page *page;
|
|
|
- pgoff_t index;
|
|
|
- int i;
|
|
|
- struct f2fs_io_info fio = {
|
|
|
- .type = META,
|
|
|
- .rw = READ_SYNC | REQ_META | REQ_PRIO
|
|
|
- };
|
|
|
-
|
|
|
-
|
|
|
- for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
|
|
|
- if (unlikely(nid >= nm_i->max_nid))
|
|
|
- nid = 0;
|
|
|
- index = current_nat_addr(sbi, nid);
|
|
|
-
|
|
|
- page = grab_cache_page(mapping, index);
|
|
|
- if (!page)
|
|
|
- continue;
|
|
|
- if (PageUptodate(page)) {
|
|
|
- mark_page_accessed(page);
|
|
|
- f2fs_put_page(page, 1);
|
|
|
- continue;
|
|
|
- }
|
|
|
- f2fs_submit_page_mbio(sbi, page, index, &fio);
|
|
|
- mark_page_accessed(page);
|
|
|
- f2fs_put_page(page, 0);
|
|
|
- }
|
|
|
- f2fs_submit_merged_bio(sbi, META, READ);
|
|
|
-}
|
|
|
-
|
|
|
static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
|
|
|
{
|
|
|
return radix_tree_lookup(&nm_i->nat_root, n);
|
|
@@ -151,6 +133,20 @@ int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
return is_cp;
|
|
|
}
|
|
|
|
|
|
+bool fsync_mark_done(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
+{
|
|
|
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
+ struct nat_entry *e;
|
|
|
+ bool fsync_done = false;
|
|
|
+
|
|
|
+ read_lock(&nm_i->nat_tree_lock);
|
|
|
+ e = __lookup_nat_cache(nm_i, nid);
|
|
|
+ if (e)
|
|
|
+ fsync_done = e->fsync_done;
|
|
|
+ read_unlock(&nm_i->nat_tree_lock);
|
|
|
+ return fsync_done;
|
|
|
+}
|
|
|
+
|
|
|
static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
|
|
|
{
|
|
|
struct nat_entry *new;
|
|
@@ -164,6 +160,7 @@ static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
|
|
|
}
|
|
|
memset(new, 0, sizeof(struct nat_entry));
|
|
|
nat_set_nid(new, nid);
|
|
|
+ new->checkpointed = true;
|
|
|
list_add_tail(&new->list, &nm_i->nat_entries);
|
|
|
nm_i->nat_cnt++;
|
|
|
return new;
|
|
@@ -185,13 +182,12 @@ retry:
|
|
|
nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
|
|
|
nat_set_ino(e, le32_to_cpu(ne->ino));
|
|
|
nat_set_version(e, ne->version);
|
|
|
- e->checkpointed = true;
|
|
|
}
|
|
|
write_unlock(&nm_i->nat_tree_lock);
|
|
|
}
|
|
|
|
|
|
static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
|
|
|
- block_t new_blkaddr)
|
|
|
+ block_t new_blkaddr, bool fsync_done)
|
|
|
{
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
struct nat_entry *e;
|
|
@@ -205,7 +201,6 @@ retry:
|
|
|
goto retry;
|
|
|
}
|
|
|
e->ni = *ni;
|
|
|
- e->checkpointed = true;
|
|
|
f2fs_bug_on(ni->blk_addr == NEW_ADDR);
|
|
|
} else if (new_blkaddr == NEW_ADDR) {
|
|
|
/*
|
|
@@ -217,9 +212,6 @@ retry:
|
|
|
f2fs_bug_on(ni->blk_addr != NULL_ADDR);
|
|
|
}
|
|
|
|
|
|
- if (new_blkaddr == NEW_ADDR)
|
|
|
- e->checkpointed = false;
|
|
|
-
|
|
|
/* sanity check */
|
|
|
f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
|
|
|
f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
|
|
@@ -239,6 +231,11 @@ retry:
|
|
|
/* change address */
|
|
|
nat_set_blkaddr(e, new_blkaddr);
|
|
|
__set_nat_cache_dirty(nm_i, e);
|
|
|
+
|
|
|
+ /* update fsync_mark if its inode nat entry is still alive */
|
|
|
+ e = __lookup_nat_cache(nm_i, ni->ino);
|
|
|
+ if (e)
|
|
|
+ e->fsync_done = fsync_done;
|
|
|
write_unlock(&nm_i->nat_tree_lock);
|
|
|
}
|
|
|
|
|
@@ -246,7 +243,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
|
|
|
{
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
|
|
|
- if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
|
|
|
+ if (available_free_memory(nm_i, NAT_ENTRIES))
|
|
|
return 0;
|
|
|
|
|
|
write_lock(&nm_i->nat_tree_lock);
|
|
@@ -505,7 +502,7 @@ static void truncate_node(struct dnode_of_data *dn)
|
|
|
/* Deallocate node address */
|
|
|
invalidate_blocks(sbi, ni.blk_addr);
|
|
|
dec_valid_node_count(sbi, dn->inode);
|
|
|
- set_node_addr(sbi, &ni, NULL_ADDR);
|
|
|
+ set_node_addr(sbi, &ni, NULL_ADDR, false);
|
|
|
|
|
|
if (dn->nid == dn->inode->i_ino) {
|
|
|
remove_orphan_inode(sbi, dn->nid);
|
|
@@ -763,7 +760,7 @@ skip_partial:
|
|
|
f2fs_put_page(page, 1);
|
|
|
goto restart;
|
|
|
}
|
|
|
- wait_on_page_writeback(page);
|
|
|
+ f2fs_wait_on_page_writeback(page, NODE);
|
|
|
ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
|
|
|
set_page_dirty(page);
|
|
|
unlock_page(page);
|
|
@@ -852,7 +849,8 @@ struct page *new_node_page(struct dnode_of_data *dn,
|
|
|
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
|
|
|
return ERR_PTR(-EPERM);
|
|
|
|
|
|
- page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
|
|
|
+ page = grab_cache_page_write_begin(NODE_MAPPING(sbi),
|
|
|
+ dn->nid, AOP_FLAG_NOFS);
|
|
|
if (!page)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
@@ -867,14 +865,14 @@ struct page *new_node_page(struct dnode_of_data *dn,
|
|
|
f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
|
|
|
new_ni = old_ni;
|
|
|
new_ni.ino = dn->inode->i_ino;
|
|
|
- set_node_addr(sbi, &new_ni, NEW_ADDR);
|
|
|
+ set_node_addr(sbi, &new_ni, NEW_ADDR, false);
|
|
|
|
|
|
fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
|
|
|
set_cold_node(dn->inode, page);
|
|
|
SetPageUptodate(page);
|
|
|
set_page_dirty(page);
|
|
|
|
|
|
- if (ofs == XATTR_NODE_OFFSET)
|
|
|
+ if (f2fs_has_xattr_block(ofs))
|
|
|
F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
|
|
|
|
|
|
dn->node_page = page;
|
|
@@ -948,7 +946,8 @@ struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
|
|
|
struct page *page;
|
|
|
int err;
|
|
|
repeat:
|
|
|
- page = grab_cache_page(NODE_MAPPING(sbi), nid);
|
|
|
+ page = grab_cache_page_write_begin(NODE_MAPPING(sbi),
|
|
|
+ nid, AOP_FLAG_NOFS);
|
|
|
if (!page)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
@@ -959,7 +958,7 @@ repeat:
|
|
|
goto got_it;
|
|
|
|
|
|
lock_page(page);
|
|
|
- if (unlikely(!PageUptodate(page))) {
|
|
|
+ if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
|
|
|
f2fs_put_page(page, 1);
|
|
|
return ERR_PTR(-EIO);
|
|
|
}
|
|
@@ -968,7 +967,6 @@ repeat:
|
|
|
goto repeat;
|
|
|
}
|
|
|
got_it:
|
|
|
- f2fs_bug_on(nid != nid_of_node(page));
|
|
|
mark_page_accessed(page);
|
|
|
return page;
|
|
|
}
|
|
@@ -1168,7 +1166,7 @@ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
|
|
|
continue;
|
|
|
|
|
|
if (ino && ino_of_node(page) == ino) {
|
|
|
- wait_on_page_writeback(page);
|
|
|
+ f2fs_wait_on_page_writeback(page, NODE);
|
|
|
if (TestClearPageError(page))
|
|
|
ret = -EIO;
|
|
|
}
|
|
@@ -1201,7 +1199,7 @@ static int f2fs_write_node_page(struct page *page,
|
|
|
if (unlikely(sbi->por_doing))
|
|
|
goto redirty_out;
|
|
|
|
|
|
- wait_on_page_writeback(page);
|
|
|
+ f2fs_wait_on_page_writeback(page, NODE);
|
|
|
|
|
|
/* get old block addr of this node page */
|
|
|
nid = nid_of_node(page);
|
|
@@ -1222,7 +1220,7 @@ static int f2fs_write_node_page(struct page *page,
|
|
|
mutex_lock(&sbi->node_write);
|
|
|
set_page_writeback(page);
|
|
|
write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
|
|
|
- set_node_addr(sbi, &ni, new_addr);
|
|
|
+ set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page));
|
|
|
dec_page_count(sbi, F2FS_DIRTY_NODES);
|
|
|
mutex_unlock(&sbi->node_write);
|
|
|
unlock_page(page);
|
|
@@ -1231,35 +1229,32 @@ static int f2fs_write_node_page(struct page *page,
|
|
|
redirty_out:
|
|
|
dec_page_count(sbi, F2FS_DIRTY_NODES);
|
|
|
wbc->pages_skipped++;
|
|
|
+ account_page_redirty(page);
|
|
|
set_page_dirty(page);
|
|
|
return AOP_WRITEPAGE_ACTIVATE;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * It is very important to gather dirty pages and write at once, so that we can
|
|
|
- * submit a big bio without interfering other data writes.
|
|
|
- * Be default, 512 pages (2MB) * 3 node types, is more reasonable.
|
|
|
- */
|
|
|
-#define COLLECT_DIRTY_NODES 1536
|
|
|
static int f2fs_write_node_pages(struct address_space *mapping,
|
|
|
struct writeback_control *wbc)
|
|
|
{
|
|
|
struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
|
|
|
- long nr_to_write = wbc->nr_to_write;
|
|
|
+ long diff;
|
|
|
|
|
|
/* balancing f2fs's metadata in background */
|
|
|
f2fs_balance_fs_bg(sbi);
|
|
|
|
|
|
/* collect a number of dirty node pages and write together */
|
|
|
- if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
|
|
|
- return 0;
|
|
|
+ if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
|
|
|
+ goto skip_write;
|
|
|
|
|
|
- /* if mounting is failed, skip writing node pages */
|
|
|
- wbc->nr_to_write = 3 * max_hw_blocks(sbi);
|
|
|
+ diff = nr_pages_to_write(sbi, NODE, wbc);
|
|
|
wbc->sync_mode = WB_SYNC_NONE;
|
|
|
sync_node_pages(sbi, 0, wbc);
|
|
|
- wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) -
|
|
|
- wbc->nr_to_write);
|
|
|
+ wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+skip_write:
|
|
|
+ wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1307,22 +1302,17 @@ const struct address_space_operations f2fs_node_aops = {
|
|
|
.releasepage = f2fs_release_node_page,
|
|
|
};
|
|
|
|
|
|
-static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
|
|
|
+static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
|
|
|
+ nid_t n)
|
|
|
{
|
|
|
- struct list_head *this;
|
|
|
- struct free_nid *i;
|
|
|
- list_for_each(this, head) {
|
|
|
- i = list_entry(this, struct free_nid, list);
|
|
|
- if (i->nid == n)
|
|
|
- return i;
|
|
|
- }
|
|
|
- return NULL;
|
|
|
+ return radix_tree_lookup(&nm_i->free_nid_root, n);
|
|
|
}
|
|
|
|
|
|
-static void __del_from_free_nid_list(struct free_nid *i)
|
|
|
+static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
|
|
|
+ struct free_nid *i)
|
|
|
{
|
|
|
list_del(&i->list);
|
|
|
- kmem_cache_free(free_nid_slab, i);
|
|
|
+ radix_tree_delete(&nm_i->free_nid_root, i->nid);
|
|
|
}
|
|
|
|
|
|
static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
|
|
@@ -1331,7 +1321,7 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
|
|
|
struct nat_entry *ne;
|
|
|
bool allocated = false;
|
|
|
|
|
|
- if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
|
|
|
+ if (!available_free_memory(nm_i, FREE_NIDS))
|
|
|
return -1;
|
|
|
|
|
|
/* 0 nid should not be used */
|
|
@@ -1342,7 +1332,8 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
|
|
|
/* do not add allocated nids */
|
|
|
read_lock(&nm_i->nat_tree_lock);
|
|
|
ne = __lookup_nat_cache(nm_i, nid);
|
|
|
- if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
|
|
|
+ if (ne &&
|
|
|
+ (!ne->checkpointed || nat_get_blkaddr(ne) != NULL_ADDR))
|
|
|
allocated = true;
|
|
|
read_unlock(&nm_i->nat_tree_lock);
|
|
|
if (allocated)
|
|
@@ -1354,7 +1345,7 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
|
|
|
i->state = NID_NEW;
|
|
|
|
|
|
spin_lock(&nm_i->free_nid_list_lock);
|
|
|
- if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
|
|
|
+ if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
|
|
|
spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
kmem_cache_free(free_nid_slab, i);
|
|
|
return 0;
|
|
@@ -1368,13 +1359,19 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
|
|
|
static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
|
|
|
{
|
|
|
struct free_nid *i;
|
|
|
+ bool need_free = false;
|
|
|
+
|
|
|
spin_lock(&nm_i->free_nid_list_lock);
|
|
|
- i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
|
|
|
+ i = __lookup_free_nid_list(nm_i, nid);
|
|
|
if (i && i->state == NID_NEW) {
|
|
|
- __del_from_free_nid_list(i);
|
|
|
+ __del_from_free_nid_list(nm_i, i);
|
|
|
nm_i->fcnt--;
|
|
|
+ need_free = true;
|
|
|
}
|
|
|
spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+
|
|
|
+ if (need_free)
|
|
|
+ kmem_cache_free(free_nid_slab, i);
|
|
|
}
|
|
|
|
|
|
static void scan_nat_page(struct f2fs_nm_info *nm_i,
|
|
@@ -1413,7 +1410,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
|
|
|
return;
|
|
|
|
|
|
/* readahead nat pages to be scanned */
|
|
|
- ra_nat_pages(sbi, nid);
|
|
|
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
|
|
|
|
|
|
while (1) {
|
|
|
struct page *page = get_current_nat_page(sbi, nid);
|
|
@@ -1454,7 +1451,6 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
|
|
|
{
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
struct free_nid *i = NULL;
|
|
|
- struct list_head *this;
|
|
|
retry:
|
|
|
if (unlikely(sbi->total_valid_node_count + 1 >= nm_i->max_nid))
|
|
|
return false;
|
|
@@ -1462,13 +1458,11 @@ retry:
|
|
|
spin_lock(&nm_i->free_nid_list_lock);
|
|
|
|
|
|
/* We should not use stale free nids created by build_free_nids */
|
|
|
- if (nm_i->fcnt && !sbi->on_build_free_nids) {
|
|
|
+ if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
|
|
|
f2fs_bug_on(list_empty(&nm_i->free_nid_list));
|
|
|
- list_for_each(this, &nm_i->free_nid_list) {
|
|
|
- i = list_entry(this, struct free_nid, list);
|
|
|
+ list_for_each_entry(i, &nm_i->free_nid_list, list)
|
|
|
if (i->state == NID_NEW)
|
|
|
break;
|
|
|
- }
|
|
|
|
|
|
f2fs_bug_on(i->state != NID_NEW);
|
|
|
*nid = i->nid;
|
|
@@ -1481,9 +1475,7 @@ retry:
|
|
|
|
|
|
/* Let's scan nat pages and its caches to get free nids */
|
|
|
mutex_lock(&nm_i->build_lock);
|
|
|
- sbi->on_build_free_nids = true;
|
|
|
build_free_nids(sbi);
|
|
|
- sbi->on_build_free_nids = false;
|
|
|
mutex_unlock(&nm_i->build_lock);
|
|
|
goto retry;
|
|
|
}
|
|
@@ -1497,10 +1489,12 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
struct free_nid *i;
|
|
|
|
|
|
spin_lock(&nm_i->free_nid_list_lock);
|
|
|
- i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
|
|
|
+ i = __lookup_free_nid_list(nm_i, nid);
|
|
|
f2fs_bug_on(!i || i->state != NID_ALLOC);
|
|
|
- __del_from_free_nid_list(i);
|
|
|
+ __del_from_free_nid_list(nm_i, i);
|
|
|
spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+
|
|
|
+ kmem_cache_free(free_nid_slab, i);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1510,20 +1504,25 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
|
|
|
{
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
struct free_nid *i;
|
|
|
+ bool need_free = false;
|
|
|
|
|
|
if (!nid)
|
|
|
return;
|
|
|
|
|
|
spin_lock(&nm_i->free_nid_list_lock);
|
|
|
- i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
|
|
|
+ i = __lookup_free_nid_list(nm_i, nid);
|
|
|
f2fs_bug_on(!i || i->state != NID_ALLOC);
|
|
|
- if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
|
|
|
- __del_from_free_nid_list(i);
|
|
|
+ if (!available_free_memory(nm_i, FREE_NIDS)) {
|
|
|
+ __del_from_free_nid_list(nm_i, i);
|
|
|
+ need_free = true;
|
|
|
} else {
|
|
|
i->state = NID_NEW;
|
|
|
nm_i->fcnt++;
|
|
|
}
|
|
|
spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+
|
|
|
+ if (need_free)
|
|
|
+ kmem_cache_free(free_nid_slab, i);
|
|
|
}
|
|
|
|
|
|
void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
|
|
@@ -1531,10 +1530,83 @@ void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
|
|
|
block_t new_blkaddr)
|
|
|
{
|
|
|
rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
|
|
|
- set_node_addr(sbi, ni, new_blkaddr);
|
|
|
+ set_node_addr(sbi, ni, new_blkaddr, false);
|
|
|
clear_node_page_dirty(page);
|
|
|
}
|
|
|
|
|
|
+void recover_inline_xattr(struct inode *inode, struct page *page)
|
|
|
+{
|
|
|
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
|
|
+ void *src_addr, *dst_addr;
|
|
|
+ size_t inline_size;
|
|
|
+ struct page *ipage;
|
|
|
+ struct f2fs_inode *ri;
|
|
|
+
|
|
|
+ if (!f2fs_has_inline_xattr(inode))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!IS_INODE(page))
|
|
|
+ return;
|
|
|
+
|
|
|
+ ri = F2FS_INODE(page);
|
|
|
+ if (!(ri->i_inline & F2FS_INLINE_XATTR))
|
|
|
+ return;
|
|
|
+
|
|
|
+ ipage = get_node_page(sbi, inode->i_ino);
|
|
|
+ f2fs_bug_on(IS_ERR(ipage));
|
|
|
+
|
|
|
+ dst_addr = inline_xattr_addr(ipage);
|
|
|
+ src_addr = inline_xattr_addr(page);
|
|
|
+ inline_size = inline_xattr_size(inode);
|
|
|
+
|
|
|
+ memcpy(dst_addr, src_addr, inline_size);
|
|
|
+
|
|
|
+ update_inode(inode, ipage);
|
|
|
+ f2fs_put_page(ipage, 1);
|
|
|
+}
|
|
|
+
|
|
|
+bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
|
|
|
+{
|
|
|
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
|
|
+ nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
|
|
|
+ nid_t new_xnid = nid_of_node(page);
|
|
|
+ struct node_info ni;
|
|
|
+
|
|
|
+ recover_inline_xattr(inode, page);
|
|
|
+
|
|
|
+ if (!f2fs_has_xattr_block(ofs_of_node(page)))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /* 1: invalidate the previous xattr nid */
|
|
|
+ if (!prev_xnid)
|
|
|
+ goto recover_xnid;
|
|
|
+
|
|
|
+ /* Deallocate node address */
|
|
|
+ get_node_info(sbi, prev_xnid, &ni);
|
|
|
+ f2fs_bug_on(ni.blk_addr == NULL_ADDR);
|
|
|
+ invalidate_blocks(sbi, ni.blk_addr);
|
|
|
+ dec_valid_node_count(sbi, inode);
|
|
|
+ set_node_addr(sbi, &ni, NULL_ADDR, false);
|
|
|
+
|
|
|
+recover_xnid:
|
|
|
+ /* 2: allocate new xattr nid */
|
|
|
+ if (unlikely(!inc_valid_node_count(sbi, inode)))
|
|
|
+ f2fs_bug_on(1);
|
|
|
+
|
|
|
+ remove_free_nid(NM_I(sbi), new_xnid);
|
|
|
+ get_node_info(sbi, new_xnid, &ni);
|
|
|
+ ni.ino = inode->i_ino;
|
|
|
+ set_node_addr(sbi, &ni, NEW_ADDR, false);
|
|
|
+ F2FS_I(inode)->i_xattr_nid = new_xnid;
|
|
|
+
|
|
|
+ /* 3: update xattr blkaddr */
|
|
|
+ refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
|
|
|
+ set_node_addr(sbi, &ni, blkaddr, false);
|
|
|
+
|
|
|
+ update_inode_page(inode);
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
|
|
|
{
|
|
|
struct f2fs_inode *src, *dst;
|
|
@@ -1567,7 +1639,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
|
|
|
|
|
|
if (unlikely(!inc_valid_node_count(sbi, NULL)))
|
|
|
WARN_ON(1);
|
|
|
- set_node_addr(sbi, &new_ni, NEW_ADDR);
|
|
|
+ set_node_addr(sbi, &new_ni, NEW_ADDR, false);
|
|
|
inc_valid_inode_count(sbi);
|
|
|
f2fs_put_page(ipage, 1);
|
|
|
return 0;
|
|
@@ -1590,15 +1662,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
|
|
|
for (; page_idx < start + nrpages; page_idx++) {
|
|
|
/* alloc temporal page for read node summary info*/
|
|
|
page = alloc_page(GFP_F2FS_ZERO);
|
|
|
- if (!page) {
|
|
|
- struct page *tmp;
|
|
|
- list_for_each_entry_safe(page, tmp, pages, lru) {
|
|
|
- list_del(&page->lru);
|
|
|
- unlock_page(page);
|
|
|
- __free_pages(page, 0);
|
|
|
- }
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
+ if (!page)
|
|
|
+ break;
|
|
|
|
|
|
lock_page(page);
|
|
|
page->index = page_idx;
|
|
@@ -1609,7 +1674,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
|
|
|
f2fs_submit_page_mbio(sbi, page, page->index, &fio);
|
|
|
|
|
|
f2fs_submit_merged_bio(sbi, META, READ);
|
|
|
- return 0;
|
|
|
+
|
|
|
+ return page_idx - start;
|
|
|
}
|
|
|
|
|
|
int restore_node_summary(struct f2fs_sb_info *sbi,
|
|
@@ -1628,15 +1694,17 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
|
|
|
addr = START_BLOCK(sbi, segno);
|
|
|
sum_entry = &sum->entries[0];
|
|
|
|
|
|
- for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
|
|
|
+ for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
|
|
|
nrpages = min(last_offset - i, bio_blocks);
|
|
|
|
|
|
/* read ahead node pages */
|
|
|
- err = ra_sum_pages(sbi, &page_list, addr, nrpages);
|
|
|
- if (err)
|
|
|
- return err;
|
|
|
+ nrpages = ra_sum_pages(sbi, &page_list, addr, nrpages);
|
|
|
+ if (!nrpages)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
list_for_each_entry_safe(page, tmp, &page_list, lru) {
|
|
|
+ if (err)
|
|
|
+ goto skip;
|
|
|
|
|
|
lock_page(page);
|
|
|
if (unlikely(!PageUptodate(page))) {
|
|
@@ -1648,9 +1716,9 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
|
|
|
sum_entry->ofs_in_node = 0;
|
|
|
sum_entry++;
|
|
|
}
|
|
|
-
|
|
|
- list_del(&page->lru);
|
|
|
unlock_page(page);
|
|
|
+skip:
|
|
|
+ list_del(&page->lru);
|
|
|
__free_pages(page, 0);
|
|
|
}
|
|
|
}
|
|
@@ -1709,7 +1777,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
|
|
|
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
|
|
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
|
|
|
struct f2fs_summary_block *sum = curseg->sum_blk;
|
|
|
- struct list_head *cur, *n;
|
|
|
+ struct nat_entry *ne, *cur;
|
|
|
struct page *page = NULL;
|
|
|
struct f2fs_nat_block *nat_blk = NULL;
|
|
|
nid_t start_nid = 0, end_nid = 0;
|
|
@@ -1721,18 +1789,17 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
|
|
|
mutex_lock(&curseg->curseg_mutex);
|
|
|
|
|
|
/* 1) flush dirty nat caches */
|
|
|
- list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
|
|
|
- struct nat_entry *ne;
|
|
|
+ list_for_each_entry_safe(ne, cur, &nm_i->dirty_nat_entries, list) {
|
|
|
nid_t nid;
|
|
|
struct f2fs_nat_entry raw_ne;
|
|
|
int offset = -1;
|
|
|
block_t new_blkaddr;
|
|
|
|
|
|
- ne = list_entry(cur, struct nat_entry, list);
|
|
|
- nid = nat_get_nid(ne);
|
|
|
-
|
|
|
if (nat_get_blkaddr(ne) == NEW_ADDR)
|
|
|
continue;
|
|
|
+
|
|
|
+ nid = nat_get_nid(ne);
|
|
|
+
|
|
|
if (flushed)
|
|
|
goto to_nat_page;
|
|
|
|
|
@@ -1783,16 +1850,12 @@ flush_now:
|
|
|
} else {
|
|
|
write_lock(&nm_i->nat_tree_lock);
|
|
|
__clear_nat_cache_dirty(nm_i, ne);
|
|
|
- ne->checkpointed = true;
|
|
|
write_unlock(&nm_i->nat_tree_lock);
|
|
|
}
|
|
|
}
|
|
|
if (!flushed)
|
|
|
mutex_unlock(&curseg->curseg_mutex);
|
|
|
f2fs_put_page(page, 1);
|
|
|
-
|
|
|
- /* 2) shrink nat caches if necessary */
|
|
|
- try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
|
|
|
}
|
|
|
|
|
|
static int init_node_manager(struct f2fs_sb_info *sbi)
|
|
@@ -1807,10 +1870,14 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
|
|
|
/* segment_count_nat includes pair segment so divide to 2. */
|
|
|
nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
|
|
|
nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
|
|
|
- nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
|
|
|
+
|
|
|
+ /* not used nids: 0, node, meta, (and root counted as valid node) */
|
|
|
+ nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks - 3;
|
|
|
nm_i->fcnt = 0;
|
|
|
nm_i->nat_cnt = 0;
|
|
|
+ nm_i->ram_thresh = DEF_RAM_THRESHOLD;
|
|
|
|
|
|
+ INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
|
|
|
INIT_LIST_HEAD(&nm_i->free_nid_list);
|
|
|
INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
|
|
|
INIT_LIST_HEAD(&nm_i->nat_entries);
|
|
@@ -1864,8 +1931,11 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
|
|
|
spin_lock(&nm_i->free_nid_list_lock);
|
|
|
list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
|
|
|
f2fs_bug_on(i->state == NID_ALLOC);
|
|
|
- __del_from_free_nid_list(i);
|
|
|
+ __del_from_free_nid_list(nm_i, i);
|
|
|
nm_i->fcnt--;
|
|
|
+ spin_unlock(&nm_i->free_nid_list_lock);
|
|
|
+ kmem_cache_free(free_nid_slab, i);
|
|
|
+ spin_lock(&nm_i->free_nid_list_lock);
|
|
|
}
|
|
|
f2fs_bug_on(nm_i->fcnt);
|
|
|
spin_unlock(&nm_i->free_nid_list_lock);
|
|
@@ -1875,11 +1945,9 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
|
|
|
while ((found = __gang_lookup_nat_cache(nm_i,
|
|
|
nid, NATVEC_SIZE, natvec))) {
|
|
|
unsigned idx;
|
|
|
- for (idx = 0; idx < found; idx++) {
|
|
|
- struct nat_entry *e = natvec[idx];
|
|
|
- nid = nat_get_nid(e) + 1;
|
|
|
- __del_from_nat_cache(nm_i, e);
|
|
|
- }
|
|
|
+ nid = nat_get_nid(natvec[found - 1]) + 1;
|
|
|
+ for (idx = 0; idx < found; idx++)
|
|
|
+ __del_from_nat_cache(nm_i, natvec[idx]);
|
|
|
}
|
|
|
f2fs_bug_on(nm_i->nat_cnt);
|
|
|
write_unlock(&nm_i->nat_tree_lock);
|
|
@@ -1892,12 +1960,12 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
|
|
|
int __init create_node_manager_caches(void)
|
|
|
{
|
|
|
nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
|
|
|
- sizeof(struct nat_entry), NULL);
|
|
|
+ sizeof(struct nat_entry));
|
|
|
if (!nat_entry_slab)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
free_nid_slab = f2fs_kmem_cache_create("free_nid",
|
|
|
- sizeof(struct free_nid), NULL);
|
|
|
+ sizeof(struct free_nid));
|
|
|
if (!free_nid_slab) {
|
|
|
kmem_cache_destroy(nat_entry_slab);
|
|
|
return -ENOMEM;
|