|
@@ -169,7 +169,7 @@ found:
|
|
|
return result - size + __reverse_ffz(tmp);
|
|
|
}
|
|
|
|
|
|
-bool need_SSR(struct f2fs_sb_info *sbi)
|
|
|
+bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
|
|
|
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
|
|
@@ -177,14 +177,14 @@ bool need_SSR(struct f2fs_sb_info *sbi)
|
|
|
|
|
|
if (test_opt(sbi, LFS))
|
|
|
return false;
|
|
|
- if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
|
|
|
+ if (sbi->gc_mode == GC_URGENT)
|
|
|
return true;
|
|
|
|
|
|
return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
|
|
|
SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
|
|
|
}
|
|
|
|
|
|
-void register_inmem_page(struct inode *inode, struct page *page)
|
|
|
+void f2fs_register_inmem_page(struct inode *inode, struct page *page)
|
|
|
{
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
struct f2fs_inode_info *fi = F2FS_I(inode);
|
|
@@ -230,6 +230,8 @@ static int __revoke_inmem_pages(struct inode *inode,
|
|
|
|
|
|
lock_page(page);
|
|
|
|
|
|
+ f2fs_wait_on_page_writeback(page, DATA, true);
|
|
|
+
|
|
|
if (recover) {
|
|
|
struct dnode_of_data dn;
|
|
|
struct node_info ni;
|
|
@@ -237,7 +239,8 @@ static int __revoke_inmem_pages(struct inode *inode,
|
|
|
trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
|
|
|
retry:
|
|
|
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
|
|
- err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
|
|
|
+ err = f2fs_get_dnode_of_data(&dn, page->index,
|
|
|
+ LOOKUP_NODE);
|
|
|
if (err) {
|
|
|
if (err == -ENOMEM) {
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
|
@@ -247,9 +250,9 @@ retry:
|
|
|
err = -EAGAIN;
|
|
|
goto next;
|
|
|
}
|
|
|
- get_node_info(sbi, dn.nid, &ni);
|
|
|
+ f2fs_get_node_info(sbi, dn.nid, &ni);
|
|
|
if (cur->old_addr == NEW_ADDR) {
|
|
|
- invalidate_blocks(sbi, dn.data_blkaddr);
|
|
|
+ f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
|
|
|
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
|
|
|
} else
|
|
|
f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
|
|
@@ -271,7 +274,7 @@ next:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-void drop_inmem_pages_all(struct f2fs_sb_info *sbi)
|
|
|
+void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
|
|
|
{
|
|
|
struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
|
|
|
struct inode *inode;
|
|
@@ -287,15 +290,23 @@ next:
|
|
|
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
|
|
|
|
|
|
if (inode) {
|
|
|
- drop_inmem_pages(inode);
|
|
|
+ if (gc_failure) {
|
|
|
+ if (fi->i_gc_failures[GC_FAILURE_ATOMIC])
|
|
|
+ goto drop;
|
|
|
+ goto skip;
|
|
|
+ }
|
|
|
+drop:
|
|
|
+ set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
|
|
|
+ f2fs_drop_inmem_pages(inode);
|
|
|
iput(inode);
|
|
|
}
|
|
|
+skip:
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
|
|
cond_resched();
|
|
|
goto next;
|
|
|
}
|
|
|
|
|
|
-void drop_inmem_pages(struct inode *inode)
|
|
|
+void f2fs_drop_inmem_pages(struct inode *inode)
|
|
|
{
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
struct f2fs_inode_info *fi = F2FS_I(inode);
|
|
@@ -309,11 +320,11 @@ void drop_inmem_pages(struct inode *inode)
|
|
|
mutex_unlock(&fi->inmem_lock);
|
|
|
|
|
|
clear_inode_flag(inode, FI_ATOMIC_FILE);
|
|
|
- clear_inode_flag(inode, FI_HOT_DATA);
|
|
|
+ fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
|
|
|
stat_dec_atomic_write(inode);
|
|
|
}
|
|
|
|
|
|
-void drop_inmem_page(struct inode *inode, struct page *page)
|
|
|
+void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
|
|
|
{
|
|
|
struct f2fs_inode_info *fi = F2FS_I(inode);
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
@@ -328,7 +339,7 @@ void drop_inmem_page(struct inode *inode, struct page *page)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- f2fs_bug_on(sbi, !cur || cur->page != page);
|
|
|
+ f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
|
|
|
list_del(&cur->list);
|
|
|
mutex_unlock(&fi->inmem_lock);
|
|
|
|
|
@@ -343,8 +354,7 @@ void drop_inmem_page(struct inode *inode, struct page *page)
|
|
|
trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
|
|
|
}
|
|
|
|
|
|
-static int __commit_inmem_pages(struct inode *inode,
|
|
|
- struct list_head *revoke_list)
|
|
|
+static int __f2fs_commit_inmem_pages(struct inode *inode)
|
|
|
{
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
struct f2fs_inode_info *fi = F2FS_I(inode);
|
|
@@ -357,9 +367,12 @@ static int __commit_inmem_pages(struct inode *inode,
|
|
|
.op_flags = REQ_SYNC | REQ_PRIO,
|
|
|
.io_type = FS_DATA_IO,
|
|
|
};
|
|
|
+ struct list_head revoke_list;
|
|
|
pgoff_t last_idx = ULONG_MAX;
|
|
|
int err = 0;
|
|
|
|
|
|
+ INIT_LIST_HEAD(&revoke_list);
|
|
|
+
|
|
|
list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
|
|
|
struct page *page = cur->page;
|
|
|
|
|
@@ -371,14 +384,14 @@ static int __commit_inmem_pages(struct inode *inode,
|
|
|
f2fs_wait_on_page_writeback(page, DATA, true);
|
|
|
if (clear_page_dirty_for_io(page)) {
|
|
|
inode_dec_dirty_pages(inode);
|
|
|
- remove_dirty_inode(inode);
|
|
|
+ f2fs_remove_dirty_inode(inode);
|
|
|
}
|
|
|
retry:
|
|
|
fio.page = page;
|
|
|
fio.old_blkaddr = NULL_ADDR;
|
|
|
fio.encrypted_page = NULL;
|
|
|
fio.need_lock = LOCK_DONE;
|
|
|
- err = do_write_data_page(&fio);
|
|
|
+ err = f2fs_do_write_data_page(&fio);
|
|
|
if (err) {
|
|
|
if (err == -ENOMEM) {
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
|
@@ -393,50 +406,46 @@ retry:
|
|
|
last_idx = page->index;
|
|
|
}
|
|
|
unlock_page(page);
|
|
|
- list_move_tail(&cur->list, revoke_list);
|
|
|
+ list_move_tail(&cur->list, &revoke_list);
|
|
|
}
|
|
|
|
|
|
if (last_idx != ULONG_MAX)
|
|
|
f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
|
|
|
|
|
|
- if (!err)
|
|
|
- __revoke_inmem_pages(inode, revoke_list, false, false);
|
|
|
+ if (err) {
|
|
|
+ /*
|
|
|
+ * try to revoke all committed pages, but still we could fail
|
|
|
+ * due to no memory or other reason, if that happened, EAGAIN
|
|
|
+ * will be returned, which means in such case, transaction is
|
|
|
+ * already not integrity, caller should use journal to do the
|
|
|
+ * recovery or rewrite & commit last transaction. For other
|
|
|
+ * error number, revoking was done by filesystem itself.
|
|
|
+ */
|
|
|
+ err = __revoke_inmem_pages(inode, &revoke_list, false, true);
|
|
|
+
|
|
|
+ /* drop all uncommitted pages */
|
|
|
+ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
|
|
|
+ } else {
|
|
|
+ __revoke_inmem_pages(inode, &revoke_list, false, false);
|
|
|
+ }
|
|
|
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-int commit_inmem_pages(struct inode *inode)
|
|
|
+int f2fs_commit_inmem_pages(struct inode *inode)
|
|
|
{
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
struct f2fs_inode_info *fi = F2FS_I(inode);
|
|
|
- struct list_head revoke_list;
|
|
|
int err;
|
|
|
|
|
|
- INIT_LIST_HEAD(&revoke_list);
|
|
|
f2fs_balance_fs(sbi, true);
|
|
|
f2fs_lock_op(sbi);
|
|
|
|
|
|
set_inode_flag(inode, FI_ATOMIC_COMMIT);
|
|
|
|
|
|
mutex_lock(&fi->inmem_lock);
|
|
|
- err = __commit_inmem_pages(inode, &revoke_list);
|
|
|
- if (err) {
|
|
|
- int ret;
|
|
|
- /*
|
|
|
- * try to revoke all committed pages, but still we could fail
|
|
|
- * due to no memory or other reason, if that happened, EAGAIN
|
|
|
- * will be returned, which means in such case, transaction is
|
|
|
- * already not integrity, caller should use journal to do the
|
|
|
- * recovery or rewrite & commit last transaction. For other
|
|
|
- * error number, revoking was done by filesystem itself.
|
|
|
- */
|
|
|
- ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
|
|
|
- if (ret)
|
|
|
- err = ret;
|
|
|
+ err = __f2fs_commit_inmem_pages(inode);
|
|
|
|
|
|
- /* drop all uncommitted pages */
|
|
|
- __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
|
|
|
- }
|
|
|
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
|
|
|
if (!list_empty(&fi->inmem_ilist))
|
|
|
list_del_init(&fi->inmem_ilist);
|
|
@@ -478,25 +487,28 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
|
|
|
|
|
|
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
|
|
|
+ return;
|
|
|
+
|
|
|
/* try to shrink extent cache when there is no enough memory */
|
|
|
- if (!available_free_memory(sbi, EXTENT_CACHE))
|
|
|
+ if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
|
|
|
f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
|
|
|
|
|
|
/* check the # of cached NAT entries */
|
|
|
- if (!available_free_memory(sbi, NAT_ENTRIES))
|
|
|
- try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
|
|
|
+ if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
|
|
|
+ f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
|
|
|
|
|
|
- if (!available_free_memory(sbi, FREE_NIDS))
|
|
|
- try_to_free_nids(sbi, MAX_FREE_NIDS);
|
|
|
+ if (!f2fs_available_free_memory(sbi, FREE_NIDS))
|
|
|
+ f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
|
|
|
else
|
|
|
- build_free_nids(sbi, false, false);
|
|
|
+ f2fs_build_free_nids(sbi, false, false);
|
|
|
|
|
|
if (!is_idle(sbi) && !excess_dirty_nats(sbi))
|
|
|
return;
|
|
|
|
|
|
/* checkpoint is the only way to shrink partial cached entries */
|
|
|
- if (!available_free_memory(sbi, NAT_ENTRIES) ||
|
|
|
- !available_free_memory(sbi, INO_ENTRIES) ||
|
|
|
+ if (!f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
|
|
|
+ !f2fs_available_free_memory(sbi, INO_ENTRIES) ||
|
|
|
excess_prefree_segs(sbi) ||
|
|
|
excess_dirty_nats(sbi) ||
|
|
|
f2fs_time_over(sbi, CP_TIME)) {
|
|
@@ -504,7 +516,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
|
|
|
struct blk_plug plug;
|
|
|
|
|
|
blk_start_plug(&plug);
|
|
|
- sync_dirty_inodes(sbi, FILE_INODE);
|
|
|
+ f2fs_sync_dirty_inodes(sbi, FILE_INODE);
|
|
|
blk_finish_plug(&plug);
|
|
|
}
|
|
|
f2fs_sync_fs(sbi->sb, true);
|
|
@@ -537,7 +549,7 @@ static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
|
|
|
return __submit_flush_wait(sbi, sbi->sb->s_bdev);
|
|
|
|
|
|
for (i = 0; i < sbi->s_ndevs; i++) {
|
|
|
- if (!is_dirty_device(sbi, ino, i, FLUSH_INO))
|
|
|
+ if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
|
|
|
continue;
|
|
|
ret = __submit_flush_wait(sbi, FDEV(i).bdev);
|
|
|
if (ret)
|
|
@@ -648,7 +660,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
|
|
|
return cmd.ret;
|
|
|
}
|
|
|
|
|
|
-int create_flush_cmd_control(struct f2fs_sb_info *sbi)
|
|
|
+int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
dev_t dev = sbi->sb->s_bdev->bd_dev;
|
|
|
struct flush_cmd_control *fcc;
|
|
@@ -685,7 +697,7 @@ init_thread:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
|
|
|
+void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
|
|
|
{
|
|
|
struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
|
|
|
|
|
@@ -915,6 +927,42 @@ static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+static void __init_discard_policy(struct f2fs_sb_info *sbi,
|
|
|
+ struct discard_policy *dpolicy,
|
|
|
+ int discard_type, unsigned int granularity)
|
|
|
+{
|
|
|
+ /* common policy */
|
|
|
+ dpolicy->type = discard_type;
|
|
|
+ dpolicy->sync = true;
|
|
|
+ dpolicy->granularity = granularity;
|
|
|
+
|
|
|
+ dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
|
|
|
+ dpolicy->io_aware_gran = MAX_PLIST_NUM;
|
|
|
+
|
|
|
+ if (discard_type == DPOLICY_BG) {
|
|
|
+ dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
|
|
|
+ dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
|
|
|
+ dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
|
|
|
+ dpolicy->io_aware = true;
|
|
|
+ dpolicy->sync = false;
|
|
|
+ if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
|
|
|
+ dpolicy->granularity = 1;
|
|
|
+ dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
|
|
|
+ }
|
|
|
+ } else if (discard_type == DPOLICY_FORCE) {
|
|
|
+ dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
|
|
|
+ dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
|
|
|
+ dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
|
|
|
+ dpolicy->io_aware = false;
|
|
|
+ } else if (discard_type == DPOLICY_FSTRIM) {
|
|
|
+ dpolicy->io_aware = false;
|
|
|
+ } else if (discard_type == DPOLICY_UMOUNT) {
|
|
|
+ dpolicy->max_requests = UINT_MAX;
|
|
|
+ dpolicy->io_aware = false;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
|
|
|
static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
struct discard_policy *dpolicy,
|
|
@@ -929,6 +977,9 @@ static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
if (dc->state != D_PREP)
|
|
|
return;
|
|
|
|
|
|
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
|
|
|
+ return;
|
|
|
+
|
|
|
trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
|
|
|
|
|
|
dc->error = __blkdev_issue_discard(dc->bdev,
|
|
@@ -972,7 +1023,7 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
|
|
|
goto do_insert;
|
|
|
}
|
|
|
|
|
|
- p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
|
|
|
+ p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
|
|
|
do_insert:
|
|
|
dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
|
|
|
if (!dc)
|
|
@@ -1037,7 +1088,7 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
|
|
|
|
|
|
mutex_lock(&dcc->cmd_lock);
|
|
|
|
|
|
- dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
|
|
|
+ dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
|
|
|
NULL, lstart,
|
|
|
(struct rb_entry **)&prev_dc,
|
|
|
(struct rb_entry **)&next_dc,
|
|
@@ -1130,68 +1181,6 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
|
|
|
- struct discard_policy *dpolicy,
|
|
|
- unsigned int start, unsigned int end)
|
|
|
-{
|
|
|
- struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
- struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
|
|
|
- struct rb_node **insert_p = NULL, *insert_parent = NULL;
|
|
|
- struct discard_cmd *dc;
|
|
|
- struct blk_plug plug;
|
|
|
- int issued;
|
|
|
-
|
|
|
-next:
|
|
|
- issued = 0;
|
|
|
-
|
|
|
- mutex_lock(&dcc->cmd_lock);
|
|
|
- f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
-
|
|
|
- dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
|
|
|
- NULL, start,
|
|
|
- (struct rb_entry **)&prev_dc,
|
|
|
- (struct rb_entry **)&next_dc,
|
|
|
- &insert_p, &insert_parent, true);
|
|
|
- if (!dc)
|
|
|
- dc = next_dc;
|
|
|
-
|
|
|
- blk_start_plug(&plug);
|
|
|
-
|
|
|
- while (dc && dc->lstart <= end) {
|
|
|
- struct rb_node *node;
|
|
|
-
|
|
|
- if (dc->len < dpolicy->granularity)
|
|
|
- goto skip;
|
|
|
-
|
|
|
- if (dc->state != D_PREP) {
|
|
|
- list_move_tail(&dc->list, &dcc->fstrim_list);
|
|
|
- goto skip;
|
|
|
- }
|
|
|
-
|
|
|
- __submit_discard_cmd(sbi, dpolicy, dc);
|
|
|
-
|
|
|
- if (++issued >= dpolicy->max_requests) {
|
|
|
- start = dc->lstart + dc->len;
|
|
|
-
|
|
|
- blk_finish_plug(&plug);
|
|
|
- mutex_unlock(&dcc->cmd_lock);
|
|
|
-
|
|
|
- schedule();
|
|
|
-
|
|
|
- goto next;
|
|
|
- }
|
|
|
-skip:
|
|
|
- node = rb_next(&dc->rb_node);
|
|
|
- dc = rb_entry_safe(node, struct discard_cmd, rb_node);
|
|
|
-
|
|
|
- if (fatal_signal_pending(current))
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- blk_finish_plug(&plug);
|
|
|
- mutex_unlock(&dcc->cmd_lock);
|
|
|
-}
|
|
|
-
|
|
|
static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
struct discard_policy *dpolicy)
|
|
|
{
|
|
@@ -1210,7 +1199,8 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
mutex_lock(&dcc->cmd_lock);
|
|
|
if (list_empty(pend_list))
|
|
|
goto next;
|
|
|
- f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
+ f2fs_bug_on(sbi,
|
|
|
+ !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
blk_start_plug(&plug);
|
|
|
list_for_each_entry_safe(dc, tmp, pend_list, list) {
|
|
|
f2fs_bug_on(sbi, dc->state != D_PREP);
|
|
@@ -1263,7 +1253,7 @@ static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
|
|
|
return dropped;
|
|
|
}
|
|
|
|
|
|
-void drop_discard_cmd(struct f2fs_sb_info *sbi)
|
|
|
+void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
__drop_discard_cmd(sbi);
|
|
|
}
|
|
@@ -1332,7 +1322,18 @@ next:
|
|
|
static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
struct discard_policy *dpolicy)
|
|
|
{
|
|
|
- __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
|
|
|
+ struct discard_policy dp;
|
|
|
+
|
|
|
+ if (dpolicy) {
|
|
|
+ __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* wait all */
|
|
|
+ __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
|
|
|
+ __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
|
|
|
+ __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
|
|
|
+ __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
|
|
|
}
|
|
|
|
|
|
/* This should be covered by global mutex, &sit_i->sentry_lock */
|
|
@@ -1343,7 +1344,8 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
|
|
|
bool need_wait = false;
|
|
|
|
|
|
mutex_lock(&dcc->cmd_lock);
|
|
|
- dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
|
|
|
+ dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
|
|
|
+ NULL, blkaddr);
|
|
|
if (dc) {
|
|
|
if (dc->state == D_PREP) {
|
|
|
__punch_discard_cmd(sbi, dc, blkaddr);
|
|
@@ -1358,7 +1360,7 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
|
|
|
__wait_one_discard_bio(sbi, dc);
|
|
|
}
|
|
|
|
|
|
-void stop_discard_thread(struct f2fs_sb_info *sbi)
|
|
|
+void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
|
|
@@ -1377,11 +1379,13 @@ bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
|
|
|
struct discard_policy dpolicy;
|
|
|
bool dropped;
|
|
|
|
|
|
- init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity);
|
|
|
+ __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
|
|
|
+ dcc->discard_granularity);
|
|
|
__issue_discard_cmd(sbi, &dpolicy);
|
|
|
dropped = __drop_discard_cmd(sbi);
|
|
|
- __wait_all_discard_cmd(sbi, &dpolicy);
|
|
|
|
|
|
+ /* just to make sure there is no pending discard commands */
|
|
|
+ __wait_all_discard_cmd(sbi, NULL);
|
|
|
return dropped;
|
|
|
}
|
|
|
|
|
@@ -1397,32 +1401,39 @@ static int issue_discard_thread(void *data)
|
|
|
set_freezable();
|
|
|
|
|
|
do {
|
|
|
- init_discard_policy(&dpolicy, DPOLICY_BG,
|
|
|
+ __init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
|
|
|
dcc->discard_granularity);
|
|
|
|
|
|
wait_event_interruptible_timeout(*q,
|
|
|
kthread_should_stop() || freezing(current) ||
|
|
|
dcc->discard_wake,
|
|
|
msecs_to_jiffies(wait_ms));
|
|
|
+
|
|
|
+ if (dcc->discard_wake)
|
|
|
+ dcc->discard_wake = 0;
|
|
|
+
|
|
|
if (try_to_freeze())
|
|
|
continue;
|
|
|
if (f2fs_readonly(sbi->sb))
|
|
|
continue;
|
|
|
if (kthread_should_stop())
|
|
|
return 0;
|
|
|
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
|
|
|
+ wait_ms = dpolicy.max_interval;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
- if (dcc->discard_wake)
|
|
|
- dcc->discard_wake = 0;
|
|
|
-
|
|
|
- if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
|
|
|
- init_discard_policy(&dpolicy, DPOLICY_FORCE, 1);
|
|
|
+ if (sbi->gc_mode == GC_URGENT)
|
|
|
+ __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
|
|
|
|
|
|
sb_start_intwrite(sbi->sb);
|
|
|
|
|
|
issued = __issue_discard_cmd(sbi, &dpolicy);
|
|
|
- if (issued) {
|
|
|
+ if (issued > 0) {
|
|
|
__wait_all_discard_cmd(sbi, &dpolicy);
|
|
|
wait_ms = dpolicy.min_interval;
|
|
|
+ } else if (issued == -1){
|
|
|
+ wait_ms = dpolicy.mid_interval;
|
|
|
} else {
|
|
|
wait_ms = dpolicy.max_interval;
|
|
|
}
|
|
@@ -1591,20 +1602,24 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-void release_discard_addrs(struct f2fs_sb_info *sbi)
|
|
|
+static void release_discard_addr(struct discard_entry *entry)
|
|
|
+{
|
|
|
+ list_del(&entry->list);
|
|
|
+ kmem_cache_free(discard_entry_slab, entry);
|
|
|
+}
|
|
|
+
|
|
|
+void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
|
|
|
struct discard_entry *entry, *this;
|
|
|
|
|
|
/* drop caches */
|
|
|
- list_for_each_entry_safe(entry, this, head, list) {
|
|
|
- list_del(&entry->list);
|
|
|
- kmem_cache_free(discard_entry_slab, entry);
|
|
|
- }
|
|
|
+ list_for_each_entry_safe(entry, this, head, list)
|
|
|
+ release_discard_addr(entry);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Should call clear_prefree_segments after checkpoint is done.
|
|
|
+ * Should call f2fs_clear_prefree_segments after checkpoint is done.
|
|
|
*/
|
|
|
static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
@@ -1617,7 +1632,8 @@ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
|
|
|
mutex_unlock(&dirty_i->seglist_lock);
|
|
|
}
|
|
|
|
|
|
-void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
+void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
|
|
|
+ struct cp_control *cpc)
|
|
|
{
|
|
|
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
struct list_head *head = &dcc->entry_list;
|
|
@@ -1700,40 +1716,13 @@ skip:
|
|
|
if (cur_pos < sbi->blocks_per_seg)
|
|
|
goto find_next;
|
|
|
|
|
|
- list_del(&entry->list);
|
|
|
+ release_discard_addr(entry);
|
|
|
dcc->nr_discards -= total_len;
|
|
|
- kmem_cache_free(discard_entry_slab, entry);
|
|
|
}
|
|
|
|
|
|
wake_up_discard_thread(sbi, false);
|
|
|
}
|
|
|
|
|
|
-void init_discard_policy(struct discard_policy *dpolicy,
|
|
|
- int discard_type, unsigned int granularity)
|
|
|
-{
|
|
|
- /* common policy */
|
|
|
- dpolicy->type = discard_type;
|
|
|
- dpolicy->sync = true;
|
|
|
- dpolicy->granularity = granularity;
|
|
|
-
|
|
|
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
|
|
|
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
|
|
|
-
|
|
|
- if (discard_type == DPOLICY_BG) {
|
|
|
- dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
|
|
|
- dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
|
|
|
- dpolicy->io_aware = true;
|
|
|
- } else if (discard_type == DPOLICY_FORCE) {
|
|
|
- dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
|
|
|
- dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
|
|
|
- dpolicy->io_aware = false;
|
|
|
- } else if (discard_type == DPOLICY_FSTRIM) {
|
|
|
- dpolicy->io_aware = false;
|
|
|
- } else if (discard_type == DPOLICY_UMOUNT) {
|
|
|
- dpolicy->io_aware = false;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
dev_t dev = sbi->sb->s_bdev->bd_dev;
|
|
@@ -1786,7 +1775,7 @@ static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
|
|
|
if (!dcc)
|
|
|
return;
|
|
|
|
|
|
- stop_discard_thread(sbi);
|
|
|
+ f2fs_stop_discard_thread(sbi);
|
|
|
|
|
|
kfree(dcc);
|
|
|
SM_I(sbi)->dcc_info = NULL;
|
|
@@ -1833,8 +1822,9 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
|
|
|
(new_vblocks > sbi->blocks_per_seg)));
|
|
|
|
|
|
se->valid_blocks = new_vblocks;
|
|
|
- se->mtime = get_mtime(sbi);
|
|
|
- SIT_I(sbi)->max_mtime = se->mtime;
|
|
|
+ se->mtime = get_mtime(sbi, false);
|
|
|
+ if (se->mtime > SIT_I(sbi)->max_mtime)
|
|
|
+ SIT_I(sbi)->max_mtime = se->mtime;
|
|
|
|
|
|
/* Update valid block bitmap */
|
|
|
if (del > 0) {
|
|
@@ -1902,7 +1892,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
|
|
|
get_sec_entry(sbi, segno)->valid_blocks += del;
|
|
|
}
|
|
|
|
|
|
-void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
|
|
|
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
|
|
|
{
|
|
|
unsigned int segno = GET_SEGNO(sbi, addr);
|
|
|
struct sit_info *sit_i = SIT_I(sbi);
|
|
@@ -1922,14 +1912,14 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
|
|
|
up_write(&sit_i->sentry_lock);
|
|
|
}
|
|
|
|
|
|
-bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
|
|
|
+bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
|
|
|
{
|
|
|
struct sit_info *sit_i = SIT_I(sbi);
|
|
|
unsigned int segno, offset;
|
|
|
struct seg_entry *se;
|
|
|
bool is_cp = false;
|
|
|
|
|
|
- if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
|
|
|
+ if (!is_valid_blkaddr(blkaddr))
|
|
|
return true;
|
|
|
|
|
|
down_read(&sit_i->sentry_lock);
|
|
@@ -1961,7 +1951,7 @@ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
|
|
|
/*
|
|
|
* Calculate the number of current summary pages for writing
|
|
|
*/
|
|
|
-int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
|
|
|
+int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
|
|
|
{
|
|
|
int valid_sum_count = 0;
|
|
|
int i, sum_in_page;
|
|
@@ -1991,14 +1981,15 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
|
|
|
/*
|
|
|
* Caller should put this summary page
|
|
|
*/
|
|
|
-struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
|
|
|
+struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
|
|
|
{
|
|
|
- return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
|
|
|
+ return f2fs_get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
|
|
|
}
|
|
|
|
|
|
-void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
|
|
|
+void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
|
|
|
+ void *src, block_t blk_addr)
|
|
|
{
|
|
|
- struct page *page = grab_meta_page(sbi, blk_addr);
|
|
|
+ struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
|
|
|
|
|
|
memcpy(page_address(page), src, PAGE_SIZE);
|
|
|
set_page_dirty(page);
|
|
@@ -2008,18 +1999,19 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
|
|
|
static void write_sum_page(struct f2fs_sb_info *sbi,
|
|
|
struct f2fs_summary_block *sum_blk, block_t blk_addr)
|
|
|
{
|
|
|
- update_meta_page(sbi, (void *)sum_blk, blk_addr);
|
|
|
+ f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
|
|
|
}
|
|
|
|
|
|
static void write_current_sum_page(struct f2fs_sb_info *sbi,
|
|
|
int type, block_t blk_addr)
|
|
|
{
|
|
|
struct curseg_info *curseg = CURSEG_I(sbi, type);
|
|
|
- struct page *page = grab_meta_page(sbi, blk_addr);
|
|
|
+ struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
|
|
|
struct f2fs_summary_block *src = curseg->sum_blk;
|
|
|
struct f2fs_summary_block *dst;
|
|
|
|
|
|
dst = (struct f2fs_summary_block *)page_address(page);
|
|
|
+ memset(dst, 0, PAGE_SIZE);
|
|
|
|
|
|
mutex_lock(&curseg->curseg_mutex);
|
|
|
|
|
@@ -2259,7 +2251,7 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type)
|
|
|
curseg->alloc_type = SSR;
|
|
|
__next_free_blkoff(sbi, curseg, 0);
|
|
|
|
|
|
- sum_page = get_sum_page(sbi, new_segno);
|
|
|
+ sum_page = f2fs_get_sum_page(sbi, new_segno);
|
|
|
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
|
|
|
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
|
|
|
f2fs_put_page(sum_page, 1);
|
|
@@ -2273,7 +2265,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
|
|
|
int i, cnt;
|
|
|
bool reversed = false;
|
|
|
|
|
|
- /* need_SSR() already forces to do this */
|
|
|
+ /* f2fs_need_SSR() already forces to do this */
|
|
|
if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
|
|
|
curseg->next_segno = segno;
|
|
|
return 1;
|
|
@@ -2325,7 +2317,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
|
|
|
new_curseg(sbi, type, false);
|
|
|
else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
|
|
|
new_curseg(sbi, type, false);
|
|
|
- else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
|
|
|
+ else if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
|
|
|
change_curseg(sbi, type);
|
|
|
else
|
|
|
new_curseg(sbi, type, false);
|
|
@@ -2333,7 +2325,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
|
|
|
stat_inc_seg_type(sbi, curseg);
|
|
|
}
|
|
|
|
|
|
-void allocate_new_segments(struct f2fs_sb_info *sbi)
|
|
|
+void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
struct curseg_info *curseg;
|
|
|
unsigned int old_segno;
|
|
@@ -2355,7 +2347,8 @@ static const struct segment_allocation default_salloc_ops = {
|
|
|
.allocate_segment = allocate_segment_by_default,
|
|
|
};
|
|
|
|
|
|
-bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
+bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
|
|
|
+ struct cp_control *cpc)
|
|
|
{
|
|
|
__u64 trim_start = cpc->trim_start;
|
|
|
bool has_candidate = false;
|
|
@@ -2373,11 +2366,72 @@ bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
return has_candidate;
|
|
|
}
|
|
|
|
|
|
+static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
|
|
|
+ struct discard_policy *dpolicy,
|
|
|
+ unsigned int start, unsigned int end)
|
|
|
+{
|
|
|
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
+ struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
|
|
|
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
|
|
|
+ struct discard_cmd *dc;
|
|
|
+ struct blk_plug plug;
|
|
|
+ int issued;
|
|
|
+
|
|
|
+next:
|
|
|
+ issued = 0;
|
|
|
+
|
|
|
+ mutex_lock(&dcc->cmd_lock);
|
|
|
+ f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
+
|
|
|
+ dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
|
|
|
+ NULL, start,
|
|
|
+ (struct rb_entry **)&prev_dc,
|
|
|
+ (struct rb_entry **)&next_dc,
|
|
|
+ &insert_p, &insert_parent, true);
|
|
|
+ if (!dc)
|
|
|
+ dc = next_dc;
|
|
|
+
|
|
|
+ blk_start_plug(&plug);
|
|
|
+
|
|
|
+ while (dc && dc->lstart <= end) {
|
|
|
+ struct rb_node *node;
|
|
|
+
|
|
|
+ if (dc->len < dpolicy->granularity)
|
|
|
+ goto skip;
|
|
|
+
|
|
|
+ if (dc->state != D_PREP) {
|
|
|
+ list_move_tail(&dc->list, &dcc->fstrim_list);
|
|
|
+ goto skip;
|
|
|
+ }
|
|
|
+
|
|
|
+ __submit_discard_cmd(sbi, dpolicy, dc);
|
|
|
+
|
|
|
+ if (++issued >= dpolicy->max_requests) {
|
|
|
+ start = dc->lstart + dc->len;
|
|
|
+
|
|
|
+ blk_finish_plug(&plug);
|
|
|
+ mutex_unlock(&dcc->cmd_lock);
|
|
|
+ __wait_all_discard_cmd(sbi, NULL);
|
|
|
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+skip:
|
|
|
+ node = rb_next(&dc->rb_node);
|
|
|
+ dc = rb_entry_safe(node, struct discard_cmd, rb_node);
|
|
|
+
|
|
|
+ if (fatal_signal_pending(current))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ blk_finish_plug(&plug);
|
|
|
+ mutex_unlock(&dcc->cmd_lock);
|
|
|
+}
|
|
|
+
|
|
|
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
|
|
|
{
|
|
|
__u64 start = F2FS_BYTES_TO_BLK(range->start);
|
|
|
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
|
|
|
- unsigned int start_segno, end_segno, cur_segno;
|
|
|
+ unsigned int start_segno, end_segno;
|
|
|
block_t start_block, end_block;
|
|
|
struct cp_control cpc;
|
|
|
struct discard_policy dpolicy;
|
|
@@ -2388,12 +2442,12 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (end <= MAIN_BLKADDR(sbi))
|
|
|
- goto out;
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
|
|
|
f2fs_msg(sbi->sb, KERN_WARNING,
|
|
|
"Found FS corruption, run fsck to fix.");
|
|
|
- goto out;
|
|
|
+ return -EIO;
|
|
|
}
|
|
|
|
|
|
/* start/end segment number in main_area */
|
|
@@ -2403,40 +2457,36 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
|
|
|
|
|
|
cpc.reason = CP_DISCARD;
|
|
|
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
|
|
|
+ cpc.trim_start = start_segno;
|
|
|
+ cpc.trim_end = end_segno;
|
|
|
|
|
|
- /* do checkpoint to issue discard commands safely */
|
|
|
- for (cur_segno = start_segno; cur_segno <= end_segno;
|
|
|
- cur_segno = cpc.trim_end + 1) {
|
|
|
- cpc.trim_start = cur_segno;
|
|
|
-
|
|
|
- if (sbi->discard_blks == 0)
|
|
|
- break;
|
|
|
- else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
|
|
|
- cpc.trim_end = end_segno;
|
|
|
- else
|
|
|
- cpc.trim_end = min_t(unsigned int,
|
|
|
- rounddown(cur_segno +
|
|
|
- BATCHED_TRIM_SEGMENTS(sbi),
|
|
|
- sbi->segs_per_sec) - 1, end_segno);
|
|
|
-
|
|
|
- mutex_lock(&sbi->gc_mutex);
|
|
|
- err = write_checkpoint(sbi, &cpc);
|
|
|
- mutex_unlock(&sbi->gc_mutex);
|
|
|
- if (err)
|
|
|
- break;
|
|
|
+ if (sbi->discard_blks == 0)
|
|
|
+ goto out;
|
|
|
|
|
|
- schedule();
|
|
|
- }
|
|
|
+ mutex_lock(&sbi->gc_mutex);
|
|
|
+ err = f2fs_write_checkpoint(sbi, &cpc);
|
|
|
+ mutex_unlock(&sbi->gc_mutex);
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
|
|
|
start_block = START_BLOCK(sbi, start_segno);
|
|
|
- end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1);
|
|
|
+ end_block = START_BLOCK(sbi, end_segno + 1);
|
|
|
|
|
|
- init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
|
|
|
+ __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
|
|
|
__issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
|
|
|
- trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We filed discard candidates, but actually we don't need to wait for
|
|
|
+ * all of them, since they'll be issued in idle time along with runtime
|
|
|
+ * discard option. User configuration looks like using runtime discard
|
|
|
+ * or periodic fstrim instead of it.
|
|
|
+ */
|
|
|
+ if (!test_opt(sbi, DISCARD)) {
|
|
|
+ trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
|
|
|
start_block, end_block);
|
|
|
+ range->len = F2FS_BLK_TO_BYTES(trimmed);
|
|
|
+ }
|
|
|
out:
|
|
|
- range->len = F2FS_BLK_TO_BYTES(trimmed);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -2448,7 +2498,7 @@ static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-int rw_hint_to_seg_type(enum rw_hint hint)
|
|
|
+int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
|
|
|
{
|
|
|
switch (hint) {
|
|
|
case WRITE_LIFE_SHORT:
|
|
@@ -2521,7 +2571,7 @@ int rw_hint_to_seg_type(enum rw_hint hint)
|
|
|
* WRITE_LIFE_LONG " WRITE_LIFE_LONG
|
|
|
*/
|
|
|
|
|
|
-enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi,
|
|
|
+enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
|
|
|
enum page_type type, enum temp_type temp)
|
|
|
{
|
|
|
if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
|
|
@@ -2588,9 +2638,11 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
|
|
|
if (is_cold_data(fio->page) || file_is_cold(inode))
|
|
|
return CURSEG_COLD_DATA;
|
|
|
if (file_is_hot(inode) ||
|
|
|
- is_inode_flag_set(inode, FI_HOT_DATA))
|
|
|
+ is_inode_flag_set(inode, FI_HOT_DATA) ||
|
|
|
+ is_inode_flag_set(inode, FI_ATOMIC_FILE) ||
|
|
|
+ is_inode_flag_set(inode, FI_VOLATILE_FILE))
|
|
|
return CURSEG_HOT_DATA;
|
|
|
- return rw_hint_to_seg_type(inode->i_write_hint);
|
|
|
+ return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
|
|
|
} else {
|
|
|
if (IS_DNODE(fio->page))
|
|
|
return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
|
|
@@ -2626,7 +2678,7 @@ static int __get_segment_type(struct f2fs_io_info *fio)
|
|
|
return type;
|
|
|
}
|
|
|
|
|
|
-void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
|
|
+void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
|
|
block_t old_blkaddr, block_t *new_blkaddr,
|
|
|
struct f2fs_summary *sum, int type,
|
|
|
struct f2fs_io_info *fio, bool add_list)
|
|
@@ -2686,6 +2738,7 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
|
|
|
|
|
INIT_LIST_HEAD(&fio->list);
|
|
|
fio->in_list = true;
|
|
|
+ fio->retry = false;
|
|
|
io = sbi->write_io[fio->type] + fio->temp;
|
|
|
spin_lock(&io->io_lock);
|
|
|
list_add_tail(&fio->list, &io->io_list);
|
|
@@ -2708,7 +2761,7 @@ static void update_device_state(struct f2fs_io_info *fio)
|
|
|
devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
|
|
|
|
|
|
/* update device state for fsync */
|
|
|
- set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
|
|
|
+ f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
|
|
|
|
|
|
/* update device state for checkpoint */
|
|
|
if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
|
|
@@ -2721,23 +2774,28 @@ static void update_device_state(struct f2fs_io_info *fio)
|
|
|
static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
|
|
{
|
|
|
int type = __get_segment_type(fio);
|
|
|
- int err;
|
|
|
+ bool keep_order = (test_opt(fio->sbi, LFS) && type == CURSEG_COLD_DATA);
|
|
|
|
|
|
+ if (keep_order)
|
|
|
+ down_read(&fio->sbi->io_order_lock);
|
|
|
reallocate:
|
|
|
- allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
|
|
+ f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
|
|
&fio->new_blkaddr, sum, type, fio, true);
|
|
|
|
|
|
/* writeout dirty page into bdev */
|
|
|
- err = f2fs_submit_page_write(fio);
|
|
|
- if (err == -EAGAIN) {
|
|
|
+ f2fs_submit_page_write(fio);
|
|
|
+ if (fio->retry) {
|
|
|
fio->old_blkaddr = fio->new_blkaddr;
|
|
|
goto reallocate;
|
|
|
- } else if (!err) {
|
|
|
- update_device_state(fio);
|
|
|
}
|
|
|
+
|
|
|
+ update_device_state(fio);
|
|
|
+
|
|
|
+ if (keep_order)
|
|
|
+ up_read(&fio->sbi->io_order_lock);
|
|
|
}
|
|
|
|
|
|
-void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
|
|
|
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
|
|
|
enum iostat_type io_type)
|
|
|
{
|
|
|
struct f2fs_io_info fio = {
|
|
@@ -2757,12 +2815,13 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
|
|
|
fio.op_flags &= ~REQ_META;
|
|
|
|
|
|
set_page_writeback(page);
|
|
|
+ ClearPageError(page);
|
|
|
f2fs_submit_page_write(&fio);
|
|
|
|
|
|
f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
|
|
|
}
|
|
|
|
|
|
-void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
|
|
|
+void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
|
|
|
{
|
|
|
struct f2fs_summary sum;
|
|
|
|
|
@@ -2772,14 +2831,15 @@ void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
|
|
|
f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
|
|
|
}
|
|
|
|
|
|
-void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
|
|
|
+void f2fs_outplace_write_data(struct dnode_of_data *dn,
|
|
|
+ struct f2fs_io_info *fio)
|
|
|
{
|
|
|
struct f2fs_sb_info *sbi = fio->sbi;
|
|
|
struct f2fs_summary sum;
|
|
|
struct node_info ni;
|
|
|
|
|
|
f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
|
|
|
- get_node_info(sbi, dn->nid, &ni);
|
|
|
+ f2fs_get_node_info(sbi, dn->nid, &ni);
|
|
|
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
|
|
|
do_write_page(&sum, fio);
|
|
|
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
|
|
@@ -2787,7 +2847,7 @@ void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
|
|
|
f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
|
|
|
}
|
|
|
|
|
|
-int rewrite_data_page(struct f2fs_io_info *fio)
|
|
|
+int f2fs_inplace_write_data(struct f2fs_io_info *fio)
|
|
|
{
|
|
|
int err;
|
|
|
struct f2fs_sb_info *sbi = fio->sbi;
|
|
@@ -2822,7 +2882,7 @@ static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
|
|
|
return i;
|
|
|
}
|
|
|
|
|
|
-void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
|
|
+void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
|
|
block_t old_blkaddr, block_t new_blkaddr,
|
|
|
bool recover_curseg, bool recover_newaddr)
|
|
|
{
|
|
@@ -2907,7 +2967,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
|
|
|
|
|
|
set_summary(&sum, dn->nid, dn->ofs_in_node, version);
|
|
|
|
|
|
- __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
|
|
|
+ f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
|
|
|
recover_curseg, recover_newaddr);
|
|
|
|
|
|
f2fs_update_data_blkaddr(dn, new_addr);
|
|
@@ -2932,7 +2992,7 @@ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
|
|
|
{
|
|
|
struct page *cpage;
|
|
|
|
|
|
- if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
|
|
|
+ if (!is_valid_blkaddr(blkaddr))
|
|
|
return;
|
|
|
|
|
|
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
|
|
@@ -2953,7 +3013,7 @@ static void read_compacted_summaries(struct f2fs_sb_info *sbi)
|
|
|
|
|
|
start = start_sum_block(sbi);
|
|
|
|
|
|
- page = get_meta_page(sbi, start++);
|
|
|
+ page = f2fs_get_meta_page(sbi, start++);
|
|
|
kaddr = (unsigned char *)page_address(page);
|
|
|
|
|
|
/* Step 1: restore nat cache */
|
|
@@ -2993,7 +3053,7 @@ static void read_compacted_summaries(struct f2fs_sb_info *sbi)
|
|
|
f2fs_put_page(page, 1);
|
|
|
page = NULL;
|
|
|
|
|
|
- page = get_meta_page(sbi, start++);
|
|
|
+ page = f2fs_get_meta_page(sbi, start++);
|
|
|
kaddr = (unsigned char *)page_address(page);
|
|
|
offset = 0;
|
|
|
}
|
|
@@ -3032,7 +3092,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
|
|
|
blk_addr = GET_SUM_BLOCK(sbi, segno);
|
|
|
}
|
|
|
|
|
|
- new = get_meta_page(sbi, blk_addr);
|
|
|
+ new = f2fs_get_meta_page(sbi, blk_addr);
|
|
|
sum = (struct f2fs_summary_block *)page_address(new);
|
|
|
|
|
|
if (IS_NODESEG(type)) {
|
|
@@ -3044,7 +3104,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
|
|
|
ns->ofs_in_node = 0;
|
|
|
}
|
|
|
} else {
|
|
|
- restore_node_summary(sbi, segno, sum);
|
|
|
+ f2fs_restore_node_summary(sbi, segno, sum);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3076,10 +3136,10 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
|
|
|
int err;
|
|
|
|
|
|
if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
|
|
|
- int npages = npages_for_summary_flush(sbi, true);
|
|
|
+ int npages = f2fs_npages_for_summary_flush(sbi, true);
|
|
|
|
|
|
if (npages >= 2)
|
|
|
- ra_meta_pages(sbi, start_sum_block(sbi), npages,
|
|
|
+ f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
|
|
|
META_CP, true);
|
|
|
|
|
|
/* restore for compacted data summary */
|
|
@@ -3088,7 +3148,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
|
|
|
}
|
|
|
|
|
|
if (__exist_node_summaries(sbi))
|
|
|
- ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
|
|
|
+ f2fs_ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
|
|
|
NR_CURSEG_TYPE - type, META_CP, true);
|
|
|
|
|
|
for (; type <= CURSEG_COLD_NODE; type++) {
|
|
@@ -3114,8 +3174,9 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
|
|
|
int written_size = 0;
|
|
|
int i, j;
|
|
|
|
|
|
- page = grab_meta_page(sbi, blkaddr++);
|
|
|
+ page = f2fs_grab_meta_page(sbi, blkaddr++);
|
|
|
kaddr = (unsigned char *)page_address(page);
|
|
|
+ memset(kaddr, 0, PAGE_SIZE);
|
|
|
|
|
|
/* Step 1: write nat cache */
|
|
|
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
|
|
@@ -3138,8 +3199,9 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
|
|
|
|
|
|
for (j = 0; j < blkoff; j++) {
|
|
|
if (!page) {
|
|
|
- page = grab_meta_page(sbi, blkaddr++);
|
|
|
+ page = f2fs_grab_meta_page(sbi, blkaddr++);
|
|
|
kaddr = (unsigned char *)page_address(page);
|
|
|
+ memset(kaddr, 0, PAGE_SIZE);
|
|
|
written_size = 0;
|
|
|
}
|
|
|
summary = (struct f2fs_summary *)(kaddr + written_size);
|
|
@@ -3174,7 +3236,7 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi,
|
|
|
write_current_sum_page(sbi, i, blkaddr + (i - type));
|
|
|
}
|
|
|
|
|
|
-void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
|
|
|
+void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
|
|
|
{
|
|
|
if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
|
|
|
write_compacted_summaries(sbi, start_blk);
|
|
@@ -3182,12 +3244,12 @@ void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
|
|
|
write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
|
|
|
}
|
|
|
|
|
|
-void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
|
|
|
+void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
|
|
|
{
|
|
|
write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
|
|
|
}
|
|
|
|
|
|
-int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
|
|
|
+int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
|
|
|
unsigned int val, int alloc)
|
|
|
{
|
|
|
int i;
|
|
@@ -3212,7 +3274,7 @@ int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
|
|
|
static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
|
|
|
unsigned int segno)
|
|
|
{
|
|
|
- return get_meta_page(sbi, current_sit_addr(sbi, segno));
|
|
|
+ return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
|
|
|
}
|
|
|
|
|
|
static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
|
|
@@ -3225,7 +3287,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
|
|
|
src_off = current_sit_addr(sbi, start);
|
|
|
dst_off = next_sit_addr(sbi, src_off);
|
|
|
|
|
|
- page = grab_meta_page(sbi, dst_off);
|
|
|
+ page = f2fs_grab_meta_page(sbi, dst_off);
|
|
|
seg_info_to_sit_page(sbi, page, start);
|
|
|
|
|
|
set_page_dirty(page);
|
|
@@ -3321,7 +3383,7 @@ static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
|
|
|
* CP calls this function, which flushes SIT entries including sit_journal,
|
|
|
* and moves prefree segs to free segs.
|
|
|
*/
|
|
|
-void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
+void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
{
|
|
|
struct sit_info *sit_i = SIT_I(sbi);
|
|
|
unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
|
|
@@ -3380,6 +3442,11 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
int offset, sit_offset;
|
|
|
|
|
|
se = get_seg_entry(sbi, segno);
|
|
|
+#ifdef CONFIG_F2FS_CHECK_FS
|
|
|
+ if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
|
|
|
+ SIT_VBLOCK_MAP_SIZE))
|
|
|
+ f2fs_bug_on(sbi, 1);
|
|
|
+#endif
|
|
|
|
|
|
/* add discard candidates */
|
|
|
if (!(cpc->reason & CP_DISCARD)) {
|
|
@@ -3388,17 +3455,21 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
}
|
|
|
|
|
|
if (to_journal) {
|
|
|
- offset = lookup_journal_in_cursum(journal,
|
|
|
+ offset = f2fs_lookup_journal_in_cursum(journal,
|
|
|
SIT_JOURNAL, segno, 1);
|
|
|
f2fs_bug_on(sbi, offset < 0);
|
|
|
segno_in_journal(journal, offset) =
|
|
|
cpu_to_le32(segno);
|
|
|
seg_info_to_raw_sit(se,
|
|
|
&sit_in_journal(journal, offset));
|
|
|
+ check_block_count(sbi, segno,
|
|
|
+ &sit_in_journal(journal, offset));
|
|
|
} else {
|
|
|
sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
|
|
|
seg_info_to_raw_sit(se,
|
|
|
&raw_sit->entries[sit_offset]);
|
|
|
+ check_block_count(sbi, segno,
|
|
|
+ &raw_sit->entries[sit_offset]);
|
|
|
}
|
|
|
|
|
|
__clear_bit(segno, bitmap);
|
|
@@ -3597,9 +3668,10 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
|
|
|
unsigned int i, start, end;
|
|
|
unsigned int readed, start_blk = 0;
|
|
|
int err = 0;
|
|
|
+ block_t total_node_blocks = 0;
|
|
|
|
|
|
do {
|
|
|
- readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
|
|
|
+ readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
|
|
|
META_SIT, true);
|
|
|
|
|
|
start = start_blk * sit_i->sents_per_block;
|
|
@@ -3619,6 +3691,8 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
|
|
|
if (err)
|
|
|
return err;
|
|
|
seg_info_from_raw_sit(se, &sit);
|
|
|
+ if (IS_NODESEG(se->type))
|
|
|
+ total_node_blocks += se->valid_blocks;
|
|
|
|
|
|
/* build discard map only one time */
|
|
|
if (f2fs_discard_en(sbi)) {
|
|
@@ -3647,15 +3721,28 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
|
|
|
unsigned int old_valid_blocks;
|
|
|
|
|
|
start = le32_to_cpu(segno_in_journal(journal, i));
|
|
|
+ if (start >= MAIN_SEGS(sbi)) {
|
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
|
+ "Wrong journal entry on segno %u",
|
|
|
+ start);
|
|
|
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
|
+ err = -EINVAL;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
se = &sit_i->sentries[start];
|
|
|
sit = sit_in_journal(journal, i);
|
|
|
|
|
|
old_valid_blocks = se->valid_blocks;
|
|
|
+ if (IS_NODESEG(se->type))
|
|
|
+ total_node_blocks -= old_valid_blocks;
|
|
|
|
|
|
err = check_block_count(sbi, start, &sit);
|
|
|
if (err)
|
|
|
break;
|
|
|
seg_info_from_raw_sit(se, &sit);
|
|
|
+ if (IS_NODESEG(se->type))
|
|
|
+ total_node_blocks += se->valid_blocks;
|
|
|
|
|
|
if (f2fs_discard_en(sbi)) {
|
|
|
if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
|
|
@@ -3664,16 +3751,28 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
|
|
|
} else {
|
|
|
memcpy(se->discard_map, se->cur_valid_map,
|
|
|
SIT_VBLOCK_MAP_SIZE);
|
|
|
- sbi->discard_blks += old_valid_blocks -
|
|
|
- se->valid_blocks;
|
|
|
+ sbi->discard_blks += old_valid_blocks;
|
|
|
+ sbi->discard_blks -= se->valid_blocks;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (sbi->segs_per_sec > 1)
|
|
|
+ if (sbi->segs_per_sec > 1) {
|
|
|
get_sec_entry(sbi, start)->valid_blocks +=
|
|
|
- se->valid_blocks - old_valid_blocks;
|
|
|
+ se->valid_blocks;
|
|
|
+ get_sec_entry(sbi, start)->valid_blocks -=
|
|
|
+ old_valid_blocks;
|
|
|
+ }
|
|
|
}
|
|
|
up_read(&curseg->journal_rwsem);
|
|
|
+
|
|
|
+ if (!err && total_node_blocks != valid_node_count(sbi)) {
|
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
|
+ "SIT is corrupted node# %u vs %u",
|
|
|
+ total_node_blocks, valid_node_count(sbi));
|
|
|
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
|
+ err = -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -3772,7 +3871,7 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi)
|
|
|
|
|
|
down_write(&sit_i->sentry_lock);
|
|
|
|
|
|
- sit_i->min_mtime = LLONG_MAX;
|
|
|
+ sit_i->min_mtime = ULLONG_MAX;
|
|
|
|
|
|
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
|
|
|
unsigned int i;
|
|
@@ -3786,11 +3885,11 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi)
|
|
|
if (sit_i->min_mtime > mtime)
|
|
|
sit_i->min_mtime = mtime;
|
|
|
}
|
|
|
- sit_i->max_mtime = get_mtime(sbi);
|
|
|
+ sit_i->max_mtime = get_mtime(sbi, false);
|
|
|
up_write(&sit_i->sentry_lock);
|
|
|
}
|
|
|
|
|
|
-int build_segment_manager(struct f2fs_sb_info *sbi)
|
|
|
+int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
|
|
|
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
|
|
@@ -3822,14 +3921,12 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
|
|
|
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
|
|
|
sm_info->min_ssr_sections = reserved_sections(sbi);
|
|
|
|
|
|
- sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
|
|
|
-
|
|
|
INIT_LIST_HEAD(&sm_info->sit_entry_set);
|
|
|
|
|
|
init_rwsem(&sm_info->curseg_lock);
|
|
|
|
|
|
if (!f2fs_readonly(sbi->sb)) {
|
|
|
- err = create_flush_cmd_control(sbi);
|
|
|
+ err = f2fs_create_flush_cmd_control(sbi);
|
|
|
if (err)
|
|
|
return err;
|
|
|
}
|
|
@@ -3954,13 +4051,13 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
|
|
|
kfree(sit_i);
|
|
|
}
|
|
|
|
|
|
-void destroy_segment_manager(struct f2fs_sb_info *sbi)
|
|
|
+void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
struct f2fs_sm_info *sm_info = SM_I(sbi);
|
|
|
|
|
|
if (!sm_info)
|
|
|
return;
|
|
|
- destroy_flush_cmd_control(sbi, true);
|
|
|
+ f2fs_destroy_flush_cmd_control(sbi, true);
|
|
|
destroy_discard_cmd_control(sbi);
|
|
|
destroy_dirty_segmap(sbi);
|
|
|
destroy_curseg(sbi);
|
|
@@ -3970,7 +4067,7 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi)
|
|
|
kfree(sm_info);
|
|
|
}
|
|
|
|
|
|
-int __init create_segment_manager_caches(void)
|
|
|
+int __init f2fs_create_segment_manager_caches(void)
|
|
|
{
|
|
|
discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
|
|
|
sizeof(struct discard_entry));
|
|
@@ -4003,7 +4100,7 @@ fail:
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
-void destroy_segment_manager_caches(void)
|
|
|
+void f2fs_destroy_segment_manager_caches(void)
|
|
|
{
|
|
|
kmem_cache_destroy(sit_entry_set_slab);
|
|
|
kmem_cache_destroy(discard_cmd_slab);
|