|
@@ -250,6 +250,36 @@ void drop_inmem_pages(struct inode *inode)
|
|
|
stat_dec_atomic_write(inode);
|
|
|
}
|
|
|
|
|
|
+void drop_inmem_page(struct inode *inode, struct page *page)
|
|
|
+{
|
|
|
+ struct f2fs_inode_info *fi = F2FS_I(inode);
|
|
|
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
+ struct list_head *head = &fi->inmem_pages;
|
|
|
+ struct inmem_pages *cur = NULL;
|
|
|
+
|
|
|
+ f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
|
|
|
+
|
|
|
+ mutex_lock(&fi->inmem_lock);
|
|
|
+ list_for_each_entry(cur, head, list) {
|
|
|
+ if (cur->page == page)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ f2fs_bug_on(sbi, !cur || cur->page != page);
|
|
|
+ list_del(&cur->list);
|
|
|
+ mutex_unlock(&fi->inmem_lock);
|
|
|
+
|
|
|
+ dec_page_count(sbi, F2FS_INMEM_PAGES);
|
|
|
+ kmem_cache_free(inmem_entry_slab, cur);
|
|
|
+
|
|
|
+ ClearPageUptodate(page);
|
|
|
+ set_page_private(page, 0);
|
|
|
+ ClearPagePrivate(page);
|
|
|
+ f2fs_put_page(page, 0);
|
|
|
+
|
|
|
+ trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
|
|
|
+}
|
|
|
+
|
|
|
static int __commit_inmem_pages(struct inode *inode,
|
|
|
struct list_head *revoke_list)
|
|
|
{
|
|
@@ -261,7 +291,6 @@ static int __commit_inmem_pages(struct inode *inode,
|
|
|
.type = DATA,
|
|
|
.op = REQ_OP_WRITE,
|
|
|
.op_flags = REQ_SYNC | REQ_PRIO,
|
|
|
- .encrypted_page = NULL,
|
|
|
};
|
|
|
pgoff_t last_idx = ULONG_MAX;
|
|
|
int err = 0;
|
|
@@ -281,6 +310,9 @@ static int __commit_inmem_pages(struct inode *inode,
|
|
|
}
|
|
|
|
|
|
fio.page = page;
|
|
|
+ fio.old_blkaddr = NULL_ADDR;
|
|
|
+ fio.encrypted_page = NULL;
|
|
|
+ fio.need_lock = false,
|
|
|
err = do_write_data_page(&fio);
|
|
|
if (err) {
|
|
|
unlock_page(page);
|
|
@@ -358,11 +390,8 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- if (!need)
|
|
|
- return;
|
|
|
-
|
|
|
/* balance_fs_bg is able to be pending */
|
|
|
- if (excess_cached_nats(sbi))
|
|
|
+ if (need && excess_cached_nats(sbi))
|
|
|
f2fs_balance_fs_bg(sbi);
|
|
|
|
|
|
/*
|
|
@@ -371,7 +400,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
|
|
|
*/
|
|
|
if (has_not_enough_free_secs(sbi, 0, 0)) {
|
|
|
mutex_lock(&sbi->gc_mutex);
|
|
|
- f2fs_gc(sbi, false, false);
|
|
|
+ f2fs_gc(sbi, false, false, NULL_SEGNO);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -390,7 +419,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
|
|
|
else
|
|
|
build_free_nids(sbi, false, false);
|
|
|
|
|
|
- if (!is_idle(sbi))
|
|
|
+ if (!is_idle(sbi) && !excess_dirty_nats(sbi))
|
|
|
return;
|
|
|
|
|
|
/* checkpoint is the only way to shrink partial cached entries */
|
|
@@ -411,32 +440,34 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int __submit_flush_wait(struct block_device *bdev)
|
|
|
+static int __submit_flush_wait(struct f2fs_sb_info *sbi,
|
|
|
+ struct block_device *bdev)
|
|
|
{
|
|
|
struct bio *bio = f2fs_bio_alloc(0);
|
|
|
int ret;
|
|
|
|
|
|
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
|
|
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
|
|
|
bio->bi_bdev = bdev;
|
|
|
ret = submit_bio_wait(bio);
|
|
|
bio_put(bio);
|
|
|
+
|
|
|
+ trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
|
|
|
+ test_opt(sbi, FLUSH_MERGE), ret);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
static int submit_flush_wait(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
- int ret = __submit_flush_wait(sbi->sb->s_bdev);
|
|
|
+ int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
|
|
|
int i;
|
|
|
|
|
|
- if (sbi->s_ndevs && !ret) {
|
|
|
- for (i = 1; i < sbi->s_ndevs; i++) {
|
|
|
- trace_f2fs_issue_flush(FDEV(i).bdev,
|
|
|
- test_opt(sbi, NOBARRIER),
|
|
|
- test_opt(sbi, FLUSH_MERGE));
|
|
|
- ret = __submit_flush_wait(FDEV(i).bdev);
|
|
|
- if (ret)
|
|
|
- break;
|
|
|
- }
|
|
|
+ if (!sbi->s_ndevs || ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ for (i = 1; i < sbi->s_ndevs; i++) {
|
|
|
+ ret = __submit_flush_wait(sbi, FDEV(i).bdev);
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
@@ -458,6 +489,8 @@ repeat:
|
|
|
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
|
|
|
|
|
|
ret = submit_flush_wait(sbi);
|
|
|
+ atomic_inc(&fcc->issued_flush);
|
|
|
+
|
|
|
llist_for_each_entry_safe(cmd, next,
|
|
|
fcc->dispatch_list, llnode) {
|
|
|
cmd->ret = ret;
|
|
@@ -475,25 +508,29 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
|
|
|
struct flush_cmd cmd;
|
|
|
+ int ret;
|
|
|
|
|
|
if (test_opt(sbi, NOBARRIER))
|
|
|
return 0;
|
|
|
|
|
|
- if (!test_opt(sbi, FLUSH_MERGE))
|
|
|
- return submit_flush_wait(sbi);
|
|
|
-
|
|
|
- if (!atomic_read(&fcc->submit_flush)) {
|
|
|
- int ret;
|
|
|
+ if (!test_opt(sbi, FLUSH_MERGE)) {
|
|
|
+ ret = submit_flush_wait(sbi);
|
|
|
+ atomic_inc(&fcc->issued_flush);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
|
|
|
- atomic_inc(&fcc->submit_flush);
|
|
|
+ if (!atomic_read(&fcc->issing_flush)) {
|
|
|
+ atomic_inc(&fcc->issing_flush);
|
|
|
ret = submit_flush_wait(sbi);
|
|
|
- atomic_dec(&fcc->submit_flush);
|
|
|
+ atomic_dec(&fcc->issing_flush);
|
|
|
+
|
|
|
+ atomic_inc(&fcc->issued_flush);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
init_completion(&cmd.wait);
|
|
|
|
|
|
- atomic_inc(&fcc->submit_flush);
|
|
|
+ atomic_inc(&fcc->issing_flush);
|
|
|
llist_add(&cmd.llnode, &fcc->issue_list);
|
|
|
|
|
|
if (!fcc->dispatch_list)
|
|
@@ -501,10 +538,10 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
|
|
|
|
|
|
if (fcc->f2fs_issue_flush) {
|
|
|
wait_for_completion(&cmd.wait);
|
|
|
- atomic_dec(&fcc->submit_flush);
|
|
|
+ atomic_dec(&fcc->issing_flush);
|
|
|
} else {
|
|
|
llist_del_all(&fcc->issue_list);
|
|
|
- atomic_set(&fcc->submit_flush, 0);
|
|
|
+ atomic_set(&fcc->issing_flush, 0);
|
|
|
}
|
|
|
|
|
|
return cmd.ret;
|
|
@@ -524,7 +561,8 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
|
|
|
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
|
|
|
if (!fcc)
|
|
|
return -ENOMEM;
|
|
|
- atomic_set(&fcc->submit_flush, 0);
|
|
|
+ atomic_set(&fcc->issued_flush, 0);
|
|
|
+ atomic_set(&fcc->issing_flush, 0);
|
|
|
init_waitqueue_head(&fcc->flush_wait_queue);
|
|
|
init_llist_head(&fcc->issue_list);
|
|
|
SM_I(sbi)->fcc_info = fcc;
|
|
@@ -597,8 +635,8 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
|
|
|
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
|
|
|
dirty_i->nr_dirty[t]--;
|
|
|
|
|
|
- if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
|
|
|
- clear_bit(GET_SECNO(sbi, segno),
|
|
|
+ if (get_valid_blocks(sbi, segno, true) == 0)
|
|
|
+ clear_bit(GET_SEC_FROM_SEG(sbi, segno),
|
|
|
dirty_i->victim_secmap);
|
|
|
}
|
|
|
}
|
|
@@ -618,7 +656,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
|
|
|
|
|
|
mutex_lock(&dirty_i->seglist_lock);
|
|
|
|
|
|
- valid_blocks = get_valid_blocks(sbi, segno, 0);
|
|
|
+ valid_blocks = get_valid_blocks(sbi, segno, false);
|
|
|
|
|
|
if (valid_blocks == 0) {
|
|
|
__locate_dirty_segment(sbi, segno, PRE);
|
|
@@ -633,162 +671,407 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
|
|
|
mutex_unlock(&dirty_i->seglist_lock);
|
|
|
}
|
|
|
|
|
|
-static void __add_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
- struct bio *bio, block_t lstart, block_t len)
|
|
|
+static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
+ struct block_device *bdev, block_t lstart,
|
|
|
+ block_t start, block_t len)
|
|
|
{
|
|
|
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
- struct list_head *cmd_list = &(dcc->discard_cmd_list);
|
|
|
+ struct list_head *pend_list;
|
|
|
struct discard_cmd *dc;
|
|
|
|
|
|
+ f2fs_bug_on(sbi, !len);
|
|
|
+
|
|
|
+ pend_list = &dcc->pend_list[plist_idx(len)];
|
|
|
+
|
|
|
dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
|
|
|
INIT_LIST_HEAD(&dc->list);
|
|
|
- dc->bio = bio;
|
|
|
- bio->bi_private = dc;
|
|
|
+ dc->bdev = bdev;
|
|
|
dc->lstart = lstart;
|
|
|
+ dc->start = start;
|
|
|
dc->len = len;
|
|
|
+ dc->ref = 0;
|
|
|
dc->state = D_PREP;
|
|
|
+ dc->error = 0;
|
|
|
init_completion(&dc->wait);
|
|
|
+ list_add_tail(&dc->list, pend_list);
|
|
|
+ atomic_inc(&dcc->discard_cmd_cnt);
|
|
|
+ dcc->undiscard_blks += len;
|
|
|
|
|
|
- mutex_lock(&dcc->cmd_lock);
|
|
|
- list_add_tail(&dc->list, cmd_list);
|
|
|
- mutex_unlock(&dcc->cmd_lock);
|
|
|
+ return dc;
|
|
|
}
|
|
|
|
|
|
-static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc)
|
|
|
+static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
+ struct block_device *bdev, block_t lstart,
|
|
|
+ block_t start, block_t len,
|
|
|
+ struct rb_node *parent, struct rb_node **p)
|
|
|
{
|
|
|
- int err = dc->bio->bi_error;
|
|
|
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
+ struct discard_cmd *dc;
|
|
|
|
|
|
- if (dc->state == D_DONE)
|
|
|
- atomic_dec(&(SM_I(sbi)->dcc_info->submit_discard));
|
|
|
+ dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
|
|
|
|
|
|
- if (err == -EOPNOTSUPP)
|
|
|
- err = 0;
|
|
|
+ rb_link_node(&dc->rb_node, parent, p);
|
|
|
+ rb_insert_color(&dc->rb_node, &dcc->root);
|
|
|
+
|
|
|
+ return dc;
|
|
|
+}
|
|
|
+
|
|
|
+static void __detach_discard_cmd(struct discard_cmd_control *dcc,
|
|
|
+ struct discard_cmd *dc)
|
|
|
+{
|
|
|
+ if (dc->state == D_DONE)
|
|
|
+ atomic_dec(&dcc->issing_discard);
|
|
|
|
|
|
- if (err)
|
|
|
- f2fs_msg(sbi->sb, KERN_INFO,
|
|
|
- "Issue discard failed, ret: %d", err);
|
|
|
- bio_put(dc->bio);
|
|
|
list_del(&dc->list);
|
|
|
+ rb_erase(&dc->rb_node, &dcc->root);
|
|
|
+ dcc->undiscard_blks -= dc->len;
|
|
|
+
|
|
|
kmem_cache_free(discard_cmd_slab, dc);
|
|
|
+
|
|
|
+ atomic_dec(&dcc->discard_cmd_cnt);
|
|
|
}
|
|
|
|
|
|
-/* This should be covered by global mutex, &sit_i->sentry_lock */
|
|
|
-void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
|
|
|
+static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
+ struct discard_cmd *dc)
|
|
|
{
|
|
|
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
- struct list_head *wait_list = &(dcc->discard_cmd_list);
|
|
|
- struct discard_cmd *dc, *tmp;
|
|
|
- struct blk_plug plug;
|
|
|
|
|
|
- mutex_lock(&dcc->cmd_lock);
|
|
|
+ if (dc->error == -EOPNOTSUPP)
|
|
|
+ dc->error = 0;
|
|
|
|
|
|
- blk_start_plug(&plug);
|
|
|
+ if (dc->error)
|
|
|
+ f2fs_msg(sbi->sb, KERN_INFO,
|
|
|
+ "Issue discard failed, ret: %d", dc->error);
|
|
|
+ __detach_discard_cmd(dcc, dc);
|
|
|
+}
|
|
|
|
|
|
- list_for_each_entry_safe(dc, tmp, wait_list, list) {
|
|
|
+static void f2fs_submit_discard_endio(struct bio *bio)
|
|
|
+{
|
|
|
+ struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
|
|
|
|
|
|
- if (blkaddr == NULL_ADDR) {
|
|
|
- if (dc->state == D_PREP) {
|
|
|
- dc->state = D_SUBMIT;
|
|
|
- submit_bio(dc->bio);
|
|
|
- atomic_inc(&dcc->submit_discard);
|
|
|
- }
|
|
|
- continue;
|
|
|
+ dc->error = bio->bi_error;
|
|
|
+ dc->state = D_DONE;
|
|
|
+ complete(&dc->wait);
|
|
|
+ bio_put(bio);
|
|
|
+}
|
|
|
+
|
|
|
+/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
|
|
|
+static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
+ struct discard_cmd *dc)
|
|
|
+{
|
|
|
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
+ struct bio *bio = NULL;
|
|
|
+
|
|
|
+ if (dc->state != D_PREP)
|
|
|
+ return;
|
|
|
+
|
|
|
+ trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
|
|
|
+
|
|
|
+ dc->error = __blkdev_issue_discard(dc->bdev,
|
|
|
+ SECTOR_FROM_BLOCK(dc->start),
|
|
|
+ SECTOR_FROM_BLOCK(dc->len),
|
|
|
+ GFP_NOFS, 0, &bio);
|
|
|
+ if (!dc->error) {
|
|
|
+ /* should keep before submission to avoid D_DONE right away */
|
|
|
+ dc->state = D_SUBMIT;
|
|
|
+ atomic_inc(&dcc->issued_discard);
|
|
|
+ atomic_inc(&dcc->issing_discard);
|
|
|
+ if (bio) {
|
|
|
+ bio->bi_private = dc;
|
|
|
+ bio->bi_end_io = f2fs_submit_discard_endio;
|
|
|
+ bio->bi_opf |= REQ_SYNC;
|
|
|
+ submit_bio(bio);
|
|
|
+ list_move_tail(&dc->list, &dcc->wait_list);
|
|
|
}
|
|
|
+ } else {
|
|
|
+ __remove_discard_cmd(sbi, dc);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len) {
|
|
|
- if (dc->state == D_SUBMIT)
|
|
|
- wait_for_completion_io(&dc->wait);
|
|
|
- else
|
|
|
- __remove_discard_cmd(sbi, dc);
|
|
|
+static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
|
|
|
+ struct block_device *bdev, block_t lstart,
|
|
|
+ block_t start, block_t len,
|
|
|
+ struct rb_node **insert_p,
|
|
|
+ struct rb_node *insert_parent)
|
|
|
+{
|
|
|
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
+ struct rb_node **p = &dcc->root.rb_node;
|
|
|
+ struct rb_node *parent = NULL;
|
|
|
+ struct discard_cmd *dc = NULL;
|
|
|
+
|
|
|
+ if (insert_p && insert_parent) {
|
|
|
+ parent = insert_parent;
|
|
|
+ p = insert_p;
|
|
|
+ goto do_insert;
|
|
|
+ }
|
|
|
+
|
|
|
+ p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
|
|
|
+do_insert:
|
|
|
+ dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
|
|
|
+ if (!dc)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return dc;
|
|
|
+}
|
|
|
+
|
|
|
+static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
|
|
|
+ struct discard_cmd *dc)
|
|
|
+{
|
|
|
+ list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
|
|
|
+}
|
|
|
+
|
|
|
+static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
+ struct discard_cmd *dc, block_t blkaddr)
|
|
|
+{
|
|
|
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
+ struct discard_info di = dc->di;
|
|
|
+ bool modified = false;
|
|
|
+
|
|
|
+ if (dc->state == D_DONE || dc->len == 1) {
|
|
|
+ __remove_discard_cmd(sbi, dc);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ dcc->undiscard_blks -= di.len;
|
|
|
+
|
|
|
+ if (blkaddr > di.lstart) {
|
|
|
+ dc->len = blkaddr - dc->lstart;
|
|
|
+ dcc->undiscard_blks += dc->len;
|
|
|
+ __relocate_discard_cmd(dcc, dc);
|
|
|
+ f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
+ modified = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (blkaddr < di.lstart + di.len - 1) {
|
|
|
+ if (modified) {
|
|
|
+ __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
|
|
|
+ di.start + blkaddr + 1 - di.lstart,
|
|
|
+ di.lstart + di.len - 1 - blkaddr,
|
|
|
+ NULL, NULL);
|
|
|
+ f2fs_bug_on(sbi,
|
|
|
+ !__check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
+ } else {
|
|
|
+ dc->lstart++;
|
|
|
+ dc->len--;
|
|
|
+ dc->start++;
|
|
|
+ dcc->undiscard_blks += dc->len;
|
|
|
+ __relocate_discard_cmd(dcc, dc);
|
|
|
+ f2fs_bug_on(sbi,
|
|
|
+ !__check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
}
|
|
|
}
|
|
|
- blk_finish_plug(&plug);
|
|
|
+}
|
|
|
|
|
|
- /* this comes from f2fs_put_super */
|
|
|
- if (blkaddr == NULL_ADDR) {
|
|
|
- list_for_each_entry_safe(dc, tmp, wait_list, list) {
|
|
|
- wait_for_completion_io(&dc->wait);
|
|
|
- __remove_discard_cmd(sbi, dc);
|
|
|
+static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
|
|
|
+ struct block_device *bdev, block_t lstart,
|
|
|
+ block_t start, block_t len)
|
|
|
+{
|
|
|
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
+ struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
|
|
|
+ struct discard_cmd *dc;
|
|
|
+ struct discard_info di = {0};
|
|
|
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
|
|
|
+ block_t end = lstart + len;
|
|
|
+
|
|
|
+ mutex_lock(&dcc->cmd_lock);
|
|
|
+
|
|
|
+ dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
|
|
|
+ NULL, lstart,
|
|
|
+ (struct rb_entry **)&prev_dc,
|
|
|
+ (struct rb_entry **)&next_dc,
|
|
|
+ &insert_p, &insert_parent, true);
|
|
|
+ if (dc)
|
|
|
+ prev_dc = dc;
|
|
|
+
|
|
|
+ if (!prev_dc) {
|
|
|
+ di.lstart = lstart;
|
|
|
+ di.len = next_dc ? next_dc->lstart - lstart : len;
|
|
|
+ di.len = min(di.len, len);
|
|
|
+ di.start = start;
|
|
|
+ }
|
|
|
+
|
|
|
+ while (1) {
|
|
|
+ struct rb_node *node;
|
|
|
+ bool merged = false;
|
|
|
+ struct discard_cmd *tdc = NULL;
|
|
|
+
|
|
|
+ if (prev_dc) {
|
|
|
+ di.lstart = prev_dc->lstart + prev_dc->len;
|
|
|
+ if (di.lstart < lstart)
|
|
|
+ di.lstart = lstart;
|
|
|
+ if (di.lstart >= end)
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (!next_dc || next_dc->lstart > end)
|
|
|
+ di.len = end - di.lstart;
|
|
|
+ else
|
|
|
+ di.len = next_dc->lstart - di.lstart;
|
|
|
+ di.start = start + di.lstart - lstart;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!di.len)
|
|
|
+ goto next;
|
|
|
+
|
|
|
+ if (prev_dc && prev_dc->state == D_PREP &&
|
|
|
+ prev_dc->bdev == bdev &&
|
|
|
+ __is_discard_back_mergeable(&di, &prev_dc->di)) {
|
|
|
+ prev_dc->di.len += di.len;
|
|
|
+ dcc->undiscard_blks += di.len;
|
|
|
+ __relocate_discard_cmd(dcc, prev_dc);
|
|
|
+ f2fs_bug_on(sbi,
|
|
|
+ !__check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
+ di = prev_dc->di;
|
|
|
+ tdc = prev_dc;
|
|
|
+ merged = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (next_dc && next_dc->state == D_PREP &&
|
|
|
+ next_dc->bdev == bdev &&
|
|
|
+ __is_discard_front_mergeable(&di, &next_dc->di)) {
|
|
|
+ next_dc->di.lstart = di.lstart;
|
|
|
+ next_dc->di.len += di.len;
|
|
|
+ next_dc->di.start = di.start;
|
|
|
+ dcc->undiscard_blks += di.len;
|
|
|
+ __relocate_discard_cmd(dcc, next_dc);
|
|
|
+ if (tdc)
|
|
|
+ __remove_discard_cmd(sbi, tdc);
|
|
|
+ f2fs_bug_on(sbi,
|
|
|
+ !__check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
+ merged = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!merged) {
|
|
|
+ __insert_discard_tree(sbi, bdev, di.lstart, di.start,
|
|
|
+ di.len, NULL, NULL);
|
|
|
+ f2fs_bug_on(sbi,
|
|
|
+ !__check_rb_tree_consistence(sbi, &dcc->root));
|
|
|
}
|
|
|
+ next:
|
|
|
+ prev_dc = next_dc;
|
|
|
+ if (!prev_dc)
|
|
|
+ break;
|
|
|
+
|
|
|
+ node = rb_next(&prev_dc->rb_node);
|
|
|
+ next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
|
|
|
}
|
|
|
+
|
|
|
mutex_unlock(&dcc->cmd_lock);
|
|
|
}
|
|
|
|
|
|
-static void f2fs_submit_discard_endio(struct bio *bio)
|
|
|
+static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
|
|
|
+ struct block_device *bdev, block_t blkstart, block_t blklen)
|
|
|
{
|
|
|
- struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
|
|
|
+ block_t lblkstart = blkstart;
|
|
|
|
|
|
- complete(&dc->wait);
|
|
|
- dc->state = D_DONE;
|
|
|
+ trace_f2fs_queue_discard(bdev, blkstart, blklen);
|
|
|
+
|
|
|
+ if (sbi->s_ndevs) {
|
|
|
+ int devi = f2fs_target_device_index(sbi, blkstart);
|
|
|
+
|
|
|
+ blkstart -= FDEV(devi).start_blk;
|
|
|
+ }
|
|
|
+ __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static int issue_discard_thread(void *data)
|
|
|
+static void __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
|
|
|
{
|
|
|
- struct f2fs_sb_info *sbi = data;
|
|
|
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
- wait_queue_head_t *q = &dcc->discard_wait_queue;
|
|
|
- struct list_head *cmd_list = &dcc->discard_cmd_list;
|
|
|
+ struct list_head *pend_list;
|
|
|
struct discard_cmd *dc, *tmp;
|
|
|
struct blk_plug plug;
|
|
|
- int iter = 0;
|
|
|
-repeat:
|
|
|
- if (kthread_should_stop())
|
|
|
- return 0;
|
|
|
+ int i, iter = 0;
|
|
|
|
|
|
+ mutex_lock(&dcc->cmd_lock);
|
|
|
blk_start_plug(&plug);
|
|
|
+ for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
|
|
|
+ pend_list = &dcc->pend_list[i];
|
|
|
+ list_for_each_entry_safe(dc, tmp, pend_list, list) {
|
|
|
+ f2fs_bug_on(sbi, dc->state != D_PREP);
|
|
|
+
|
|
|
+ if (!issue_cond || is_idle(sbi))
|
|
|
+ __submit_discard_cmd(sbi, dc);
|
|
|
+ if (issue_cond && iter++ > DISCARD_ISSUE_RATE)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ }
|
|
|
+out:
|
|
|
+ blk_finish_plug(&plug);
|
|
|
+ mutex_unlock(&dcc->cmd_lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
|
|
|
+{
|
|
|
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
+ struct list_head *wait_list = &(dcc->wait_list);
|
|
|
+ struct discard_cmd *dc, *tmp;
|
|
|
|
|
|
mutex_lock(&dcc->cmd_lock);
|
|
|
- list_for_each_entry_safe(dc, tmp, cmd_list, list) {
|
|
|
- if (dc->state == D_PREP) {
|
|
|
- dc->state = D_SUBMIT;
|
|
|
- submit_bio(dc->bio);
|
|
|
- atomic_inc(&dcc->submit_discard);
|
|
|
- if (iter++ > DISCARD_ISSUE_RATE)
|
|
|
- break;
|
|
|
- } else if (dc->state == D_DONE) {
|
|
|
+ list_for_each_entry_safe(dc, tmp, wait_list, list) {
|
|
|
+ if (!wait_cond || dc->state == D_DONE) {
|
|
|
+ if (dc->ref)
|
|
|
+ continue;
|
|
|
+ wait_for_completion_io(&dc->wait);
|
|
|
__remove_discard_cmd(sbi, dc);
|
|
|
}
|
|
|
}
|
|
|
mutex_unlock(&dcc->cmd_lock);
|
|
|
+}
|
|
|
|
|
|
- blk_finish_plug(&plug);
|
|
|
+/* This should be covered by global mutex, &sit_i->sentry_lock */
|
|
|
+void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
|
|
|
+{
|
|
|
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
+ struct discard_cmd *dc;
|
|
|
+ bool need_wait = false;
|
|
|
|
|
|
- iter = 0;
|
|
|
- congestion_wait(BLK_RW_SYNC, HZ/50);
|
|
|
+ mutex_lock(&dcc->cmd_lock);
|
|
|
+ dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
|
|
|
+ if (dc) {
|
|
|
+ if (dc->state == D_PREP) {
|
|
|
+ __punch_discard_cmd(sbi, dc, blkaddr);
|
|
|
+ } else {
|
|
|
+ dc->ref++;
|
|
|
+ need_wait = true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ mutex_unlock(&dcc->cmd_lock);
|
|
|
|
|
|
- wait_event_interruptible(*q,
|
|
|
- kthread_should_stop() || !list_empty(&dcc->discard_cmd_list));
|
|
|
- goto repeat;
|
|
|
+ if (need_wait) {
|
|
|
+ wait_for_completion_io(&dc->wait);
|
|
|
+ mutex_lock(&dcc->cmd_lock);
|
|
|
+ f2fs_bug_on(sbi, dc->state != D_DONE);
|
|
|
+ dc->ref--;
|
|
|
+ if (!dc->ref)
|
|
|
+ __remove_discard_cmd(sbi, dc);
|
|
|
+ mutex_unlock(&dcc->cmd_lock);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
|
|
|
-static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
|
|
|
- struct block_device *bdev, block_t blkstart, block_t blklen)
|
|
|
+/* This comes from f2fs_put_super */
|
|
|
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
- struct bio *bio = NULL;
|
|
|
- block_t lblkstart = blkstart;
|
|
|
- int err;
|
|
|
+ __issue_discard_cmd(sbi, false);
|
|
|
+ __wait_discard_cmd(sbi, false);
|
|
|
+}
|
|
|
|
|
|
- trace_f2fs_issue_discard(bdev, blkstart, blklen);
|
|
|
+static int issue_discard_thread(void *data)
|
|
|
+{
|
|
|
+ struct f2fs_sb_info *sbi = data;
|
|
|
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
+ wait_queue_head_t *q = &dcc->discard_wait_queue;
|
|
|
+repeat:
|
|
|
+ if (kthread_should_stop())
|
|
|
+ return 0;
|
|
|
|
|
|
- if (sbi->s_ndevs) {
|
|
|
- int devi = f2fs_target_device_index(sbi, blkstart);
|
|
|
+ __issue_discard_cmd(sbi, true);
|
|
|
+ __wait_discard_cmd(sbi, true);
|
|
|
|
|
|
- blkstart -= FDEV(devi).start_blk;
|
|
|
- }
|
|
|
- err = __blkdev_issue_discard(bdev,
|
|
|
- SECTOR_FROM_BLOCK(blkstart),
|
|
|
- SECTOR_FROM_BLOCK(blklen),
|
|
|
- GFP_NOFS, 0, &bio);
|
|
|
- if (!err && bio) {
|
|
|
- bio->bi_end_io = f2fs_submit_discard_endio;
|
|
|
- bio->bi_opf |= REQ_SYNC;
|
|
|
+ congestion_wait(BLK_RW_SYNC, HZ/50);
|
|
|
|
|
|
- __add_discard_cmd(sbi, bio, lblkstart, blklen);
|
|
|
- wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
|
|
|
- }
|
|
|
- return err;
|
|
|
+ wait_event_interruptible(*q, kthread_should_stop() ||
|
|
|
+ atomic_read(&dcc->discard_cmd_cnt));
|
|
|
+ goto repeat;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_BLK_DEV_ZONED
|
|
@@ -796,6 +1079,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
|
|
|
struct block_device *bdev, block_t blkstart, block_t blklen)
|
|
|
{
|
|
|
sector_t sector, nr_sects;
|
|
|
+ block_t lblkstart = blkstart;
|
|
|
int devi = 0;
|
|
|
|
|
|
if (sbi->s_ndevs) {
|
|
@@ -813,7 +1097,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
|
|
|
case BLK_ZONE_TYPE_CONVENTIONAL:
|
|
|
if (!blk_queue_discard(bdev_get_queue(bdev)))
|
|
|
return 0;
|
|
|
- return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
|
|
|
+ return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
|
|
|
case BLK_ZONE_TYPE_SEQWRITE_REQ:
|
|
|
case BLK_ZONE_TYPE_SEQWRITE_PREF:
|
|
|
sector = SECTOR_FROM_BLOCK(blkstart);
|
|
@@ -845,7 +1129,7 @@ static int __issue_discard_async(struct f2fs_sb_info *sbi,
|
|
|
bdev_zoned_model(bdev) != BLK_ZONED_NONE)
|
|
|
return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
|
|
|
#endif
|
|
|
- return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
|
|
|
+ return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
|
|
|
}
|
|
|
|
|
|
static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
|
|
@@ -888,32 +1172,6 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static void __add_discard_entry(struct f2fs_sb_info *sbi,
|
|
|
- struct cp_control *cpc, struct seg_entry *se,
|
|
|
- unsigned int start, unsigned int end)
|
|
|
-{
|
|
|
- struct list_head *head = &SM_I(sbi)->dcc_info->discard_entry_list;
|
|
|
- struct discard_entry *new, *last;
|
|
|
-
|
|
|
- if (!list_empty(head)) {
|
|
|
- last = list_last_entry(head, struct discard_entry, list);
|
|
|
- if (START_BLOCK(sbi, cpc->trim_start) + start ==
|
|
|
- last->blkaddr + last->len &&
|
|
|
- last->len < MAX_DISCARD_BLOCKS(sbi)) {
|
|
|
- last->len += end - start;
|
|
|
- goto done;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
|
|
|
- INIT_LIST_HEAD(&new->list);
|
|
|
- new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
|
|
|
- new->len = end - start;
|
|
|
- list_add_tail(&new->list, head);
|
|
|
-done:
|
|
|
- SM_I(sbi)->dcc_info->nr_discards += end - start;
|
|
|
-}
|
|
|
-
|
|
|
static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
|
|
|
bool check_only)
|
|
|
{
|
|
@@ -925,7 +1183,9 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
|
|
|
unsigned long *discard_map = (unsigned long *)se->discard_map;
|
|
|
unsigned long *dmap = SIT_I(sbi)->tmp_map;
|
|
|
unsigned int start = 0, end = -1;
|
|
|
- bool force = (cpc->reason == CP_DISCARD);
|
|
|
+ bool force = (cpc->reason & CP_DISCARD);
|
|
|
+ struct discard_entry *de = NULL;
|
|
|
+ struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
|
|
|
int i;
|
|
|
|
|
|
if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
|
|
@@ -957,14 +1217,24 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
|
|
|
if (check_only)
|
|
|
return true;
|
|
|
|
|
|
- __add_discard_entry(sbi, cpc, se, start, end);
|
|
|
+ if (!de) {
|
|
|
+ de = f2fs_kmem_cache_alloc(discard_entry_slab,
|
|
|
+ GFP_F2FS_ZERO);
|
|
|
+ de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
|
|
|
+ list_add_tail(&de->list, head);
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = start; i < end; i++)
|
|
|
+ __set_bit_le(i, (void *)de->discard_map);
|
|
|
+
|
|
|
+ SM_I(sbi)->dcc_info->nr_discards += end - start;
|
|
|
}
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
void release_discard_addrs(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
- struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
|
|
|
+ struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
|
|
|
struct discard_entry *entry, *this;
|
|
|
|
|
|
/* drop caches */
|
|
@@ -990,13 +1260,13 @@ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
|
|
|
|
|
|
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
{
|
|
|
- struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
|
|
|
+ struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
|
|
|
struct discard_entry *entry, *this;
|
|
|
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
|
|
|
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
|
|
|
unsigned int start = 0, end = -1;
|
|
|
unsigned int secno, start_segno;
|
|
|
- bool force = (cpc->reason == CP_DISCARD);
|
|
|
+ bool force = (cpc->reason & CP_DISCARD);
|
|
|
|
|
|
mutex_lock(&dirty_i->seglist_lock);
|
|
|
|
|
@@ -1026,10 +1296,10 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
continue;
|
|
|
}
|
|
|
next:
|
|
|
- secno = GET_SECNO(sbi, start);
|
|
|
- start_segno = secno * sbi->segs_per_sec;
|
|
|
+ secno = GET_SEC_FROM_SEG(sbi, start);
|
|
|
+ start_segno = GET_SEG_FROM_SEC(sbi, secno);
|
|
|
if (!IS_CURSEC(sbi, secno) &&
|
|
|
- !get_valid_blocks(sbi, start, sbi->segs_per_sec))
|
|
|
+ !get_valid_blocks(sbi, start, true))
|
|
|
f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
|
|
|
sbi->segs_per_sec << sbi->log_blocks_per_seg);
|
|
|
|
|
@@ -1043,22 +1313,46 @@ next:
|
|
|
|
|
|
/* send small discards */
|
|
|
list_for_each_entry_safe(entry, this, head, list) {
|
|
|
- if (force && entry->len < cpc->trim_minlen)
|
|
|
- goto skip;
|
|
|
- f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
|
|
|
- cpc->trimmed += entry->len;
|
|
|
+ unsigned int cur_pos = 0, next_pos, len, total_len = 0;
|
|
|
+ bool is_valid = test_bit_le(0, entry->discard_map);
|
|
|
+
|
|
|
+find_next:
|
|
|
+ if (is_valid) {
|
|
|
+ next_pos = find_next_zero_bit_le(entry->discard_map,
|
|
|
+ sbi->blocks_per_seg, cur_pos);
|
|
|
+ len = next_pos - cur_pos;
|
|
|
+
|
|
|
+ if (force && len < cpc->trim_minlen)
|
|
|
+ goto skip;
|
|
|
+
|
|
|
+ f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
|
|
|
+ len);
|
|
|
+ cpc->trimmed += len;
|
|
|
+ total_len += len;
|
|
|
+ } else {
|
|
|
+ next_pos = find_next_bit_le(entry->discard_map,
|
|
|
+ sbi->blocks_per_seg, cur_pos);
|
|
|
+ }
|
|
|
skip:
|
|
|
+ cur_pos = next_pos;
|
|
|
+ is_valid = !is_valid;
|
|
|
+
|
|
|
+ if (cur_pos < sbi->blocks_per_seg)
|
|
|
+ goto find_next;
|
|
|
+
|
|
|
list_del(&entry->list);
|
|
|
- SM_I(sbi)->dcc_info->nr_discards -= entry->len;
|
|
|
+ SM_I(sbi)->dcc_info->nr_discards -= total_len;
|
|
|
kmem_cache_free(discard_entry_slab, entry);
|
|
|
}
|
|
|
+
|
|
|
+ wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
|
|
|
}
|
|
|
|
|
|
static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
dev_t dev = sbi->sb->s_bdev->bd_dev;
|
|
|
struct discard_cmd_control *dcc;
|
|
|
- int err = 0;
|
|
|
+ int err = 0, i;
|
|
|
|
|
|
if (SM_I(sbi)->dcc_info) {
|
|
|
dcc = SM_I(sbi)->dcc_info;
|
|
@@ -1069,12 +1363,18 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
|
|
|
if (!dcc)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- INIT_LIST_HEAD(&dcc->discard_entry_list);
|
|
|
- INIT_LIST_HEAD(&dcc->discard_cmd_list);
|
|
|
+ INIT_LIST_HEAD(&dcc->entry_list);
|
|
|
+ for (i = 0; i < MAX_PLIST_NUM; i++)
|
|
|
+ INIT_LIST_HEAD(&dcc->pend_list[i]);
|
|
|
+ INIT_LIST_HEAD(&dcc->wait_list);
|
|
|
mutex_init(&dcc->cmd_lock);
|
|
|
- atomic_set(&dcc->submit_discard, 0);
|
|
|
+ atomic_set(&dcc->issued_discard, 0);
|
|
|
+ atomic_set(&dcc->issing_discard, 0);
|
|
|
+ atomic_set(&dcc->discard_cmd_cnt, 0);
|
|
|
dcc->nr_discards = 0;
|
|
|
- dcc->max_discards = 0;
|
|
|
+ dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
|
|
|
+ dcc->undiscard_blks = 0;
|
|
|
+ dcc->root = RB_ROOT;
|
|
|
|
|
|
init_waitqueue_head(&dcc->discard_wait_queue);
|
|
|
SM_I(sbi)->dcc_info = dcc;
|
|
@@ -1091,20 +1391,22 @@ init_thread:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi, bool free)
|
|
|
+static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
|
|
|
{
|
|
|
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
|
|
|
|
|
|
- if (dcc && dcc->f2fs_issue_discard) {
|
|
|
+ if (!dcc)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (dcc->f2fs_issue_discard) {
|
|
|
struct task_struct *discard_thread = dcc->f2fs_issue_discard;
|
|
|
|
|
|
dcc->f2fs_issue_discard = NULL;
|
|
|
kthread_stop(discard_thread);
|
|
|
}
|
|
|
- if (free) {
|
|
|
- kfree(dcc);
|
|
|
- SM_I(sbi)->dcc_info = NULL;
|
|
|
- }
|
|
|
+
|
|
|
+ kfree(dcc);
|
|
|
+ SM_I(sbi)->dcc_info = NULL;
|
|
|
}
|
|
|
|
|
|
static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
|
|
@@ -1345,6 +1647,17 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi,
|
|
|
f2fs_put_page(page, 1);
|
|
|
}
|
|
|
|
|
|
+static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
|
|
|
+{
|
|
|
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
|
|
|
+ unsigned int segno = curseg->segno + 1;
|
|
|
+ struct free_segmap_info *free_i = FREE_I(sbi);
|
|
|
+
|
|
|
+ if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
|
|
|
+ return !test_bit(segno, free_i->free_segmap);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Find a new segment from the free segments bitmap to right order
|
|
|
* This function should be returned with success, otherwise BUG
|
|
@@ -1355,8 +1668,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
|
|
|
struct free_segmap_info *free_i = FREE_I(sbi);
|
|
|
unsigned int segno, secno, zoneno;
|
|
|
unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
|
|
|
- unsigned int hint = *newseg / sbi->segs_per_sec;
|
|
|
- unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
|
|
|
+ unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
|
|
|
+ unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
|
|
|
unsigned int left_start = hint;
|
|
|
bool init = true;
|
|
|
int go_left = 0;
|
|
@@ -1366,8 +1679,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
|
|
|
|
|
|
if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
|
|
|
segno = find_next_zero_bit(free_i->free_segmap,
|
|
|
- (hint + 1) * sbi->segs_per_sec, *newseg + 1);
|
|
|
- if (segno < (hint + 1) * sbi->segs_per_sec)
|
|
|
+ GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
|
|
|
+ if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
|
|
|
goto got_it;
|
|
|
}
|
|
|
find_other_zone:
|
|
@@ -1398,8 +1711,8 @@ find_other_zone:
|
|
|
secno = left_start;
|
|
|
skip_left:
|
|
|
hint = secno;
|
|
|
- segno = secno * sbi->segs_per_sec;
|
|
|
- zoneno = secno / sbi->secs_per_zone;
|
|
|
+ segno = GET_SEG_FROM_SEC(sbi, secno);
|
|
|
+ zoneno = GET_ZONE_FROM_SEC(sbi, secno);
|
|
|
|
|
|
/* give up on finding another zone */
|
|
|
if (!init)
|
|
@@ -1443,7 +1756,7 @@ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
|
|
|
struct summary_footer *sum_footer;
|
|
|
|
|
|
curseg->segno = curseg->next_segno;
|
|
|
- curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
|
|
|
+ curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
|
|
|
curseg->next_blkoff = 0;
|
|
|
curseg->next_segno = NULL_SEGNO;
|
|
|
|
|
@@ -1456,6 +1769,20 @@ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
|
|
|
__set_sit_entry_type(sbi, type, curseg->segno, modified);
|
|
|
}
|
|
|
|
|
|
+static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
|
|
|
+{
|
|
|
+ /* if segs_per_sec is large than 1, we need to keep original policy. */
|
|
|
+ if (sbi->segs_per_sec != 1)
|
|
|
+ return CURSEG_I(sbi, type)->segno;
|
|
|
+
|
|
|
+ if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
|
|
|
+ return SIT_I(sbi)->last_victim[ALLOC_NEXT];
|
|
|
+ return CURSEG_I(sbi, type)->segno;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Allocate a current working segment.
|
|
|
* This function always allocates a free segment in LFS manner.
|
|
@@ -1474,6 +1801,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
|
|
|
if (test_opt(sbi, NOHEAP))
|
|
|
dir = ALLOC_RIGHT;
|
|
|
|
|
|
+ segno = __get_next_segno(sbi, type);
|
|
|
get_new_segment(sbi, &segno, new_sec, dir);
|
|
|
curseg->next_segno = segno;
|
|
|
reset_curseg(sbi, type, 1);
|
|
@@ -1549,12 +1877,15 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
|
|
|
{
|
|
|
struct curseg_info *curseg = CURSEG_I(sbi, type);
|
|
|
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
|
|
|
+ unsigned segno = NULL_SEGNO;
|
|
|
int i, cnt;
|
|
|
bool reversed = false;
|
|
|
|
|
|
/* need_SSR() already forces to do this */
|
|
|
- if (v_ops->get_victim(sbi, &(curseg)->next_segno, BG_GC, type, SSR))
|
|
|
+ if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
|
|
|
+ curseg->next_segno = segno;
|
|
|
return 1;
|
|
|
+ }
|
|
|
|
|
|
/* For node segments, let's do SSR more intensively */
|
|
|
if (IS_NODESEG(type)) {
|
|
@@ -1578,9 +1909,10 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
|
|
|
for (; cnt-- > 0; reversed ? i-- : i++) {
|
|
|
if (i == type)
|
|
|
continue;
|
|
|
- if (v_ops->get_victim(sbi, &(curseg)->next_segno,
|
|
|
- BG_GC, i, SSR))
|
|
|
+ if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
|
|
|
+ curseg->next_segno = segno;
|
|
|
return 1;
|
|
|
+ }
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -1592,17 +1924,21 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
|
|
|
static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
|
|
|
int type, bool force)
|
|
|
{
|
|
|
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
|
|
|
+
|
|
|
if (force)
|
|
|
new_curseg(sbi, type, true);
|
|
|
else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
|
|
|
type == CURSEG_WARM_NODE)
|
|
|
new_curseg(sbi, type, false);
|
|
|
+ else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
|
|
|
+ new_curseg(sbi, type, false);
|
|
|
else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
|
|
|
change_curseg(sbi, type, true);
|
|
|
else
|
|
|
new_curseg(sbi, type, false);
|
|
|
|
|
|
- stat_inc_seg_type(sbi, CURSEG_I(sbi, type));
|
|
|
+ stat_inc_seg_type(sbi, curseg);
|
|
|
}
|
|
|
|
|
|
void allocate_new_segments(struct f2fs_sb_info *sbi)
|
|
@@ -1734,18 +2070,16 @@ static int __get_segment_type_6(struct page *page, enum page_type p_type)
|
|
|
if (p_type == DATA) {
|
|
|
struct inode *inode = page->mapping->host;
|
|
|
|
|
|
- if (S_ISDIR(inode->i_mode))
|
|
|
- return CURSEG_HOT_DATA;
|
|
|
- else if (is_cold_data(page) || file_is_cold(inode))
|
|
|
+ if (is_cold_data(page) || file_is_cold(inode))
|
|
|
return CURSEG_COLD_DATA;
|
|
|
- else
|
|
|
- return CURSEG_WARM_DATA;
|
|
|
+ if (is_inode_flag_set(inode, FI_HOT_DATA))
|
|
|
+ return CURSEG_HOT_DATA;
|
|
|
+ return CURSEG_WARM_DATA;
|
|
|
} else {
|
|
|
if (IS_DNODE(page))
|
|
|
return is_cold_node(page) ? CURSEG_WARM_NODE :
|
|
|
CURSEG_HOT_NODE;
|
|
|
- else
|
|
|
- return CURSEG_COLD_NODE;
|
|
|
+ return CURSEG_COLD_NODE;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1788,15 +2122,14 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
|
|
|
|
|
stat_inc_block_count(sbi, curseg);
|
|
|
|
|
|
+ if (!__has_curseg_space(sbi, type))
|
|
|
+ sit_i->s_ops->allocate_segment(sbi, type, false);
|
|
|
/*
|
|
|
- * SIT information should be updated before segment allocation,
|
|
|
- * since SSR needs latest valid block information.
|
|
|
+ * SIT information should be updated after segment allocation,
|
|
|
+ * since we need to keep dirty segments precisely under SSR.
|
|
|
*/
|
|
|
refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
|
|
|
|
|
|
- if (!__has_curseg_space(sbi, type))
|
|
|
- sit_i->s_ops->allocate_segment(sbi, type, false);
|
|
|
-
|
|
|
mutex_unlock(&sit_i->sentry_lock);
|
|
|
|
|
|
if (page && IS_NODESEG(type))
|
|
@@ -1868,11 +2201,11 @@ void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
|
|
|
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
|
|
|
}
|
|
|
|
|
|
-void rewrite_data_page(struct f2fs_io_info *fio)
|
|
|
+int rewrite_data_page(struct f2fs_io_info *fio)
|
|
|
{
|
|
|
fio->new_blkaddr = fio->old_blkaddr;
|
|
|
stat_inc_inplace_blocks(fio->sbi);
|
|
|
- f2fs_submit_page_mbio(fio);
|
|
|
+ return f2fs_submit_page_bio(fio);
|
|
|
}
|
|
|
|
|
|
void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
|
@@ -2437,7 +2770,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
se = get_seg_entry(sbi, segno);
|
|
|
|
|
|
/* add discard candidates */
|
|
|
- if (cpc->reason != CP_DISCARD) {
|
|
|
+ if (!(cpc->reason & CP_DISCARD)) {
|
|
|
cpc->trim_start = segno;
|
|
|
add_discard_addrs(sbi, cpc, false);
|
|
|
}
|
|
@@ -2473,7 +2806,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|
|
f2fs_bug_on(sbi, !list_empty(head));
|
|
|
f2fs_bug_on(sbi, sit_i->dirty_sentries);
|
|
|
out:
|
|
|
- if (cpc->reason == CP_DISCARD) {
|
|
|
+ if (cpc->reason & CP_DISCARD) {
|
|
|
__u64 trim_start = cpc->trim_start;
|
|
|
|
|
|
for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
|
|
@@ -2672,10 +3005,17 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
|
|
|
|
|
|
/* build discard map only one time */
|
|
|
if (f2fs_discard_en(sbi)) {
|
|
|
- memcpy(se->discard_map, se->cur_valid_map,
|
|
|
- SIT_VBLOCK_MAP_SIZE);
|
|
|
- sbi->discard_blks += sbi->blocks_per_seg -
|
|
|
- se->valid_blocks;
|
|
|
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
|
|
|
+ memset(se->discard_map, 0xff,
|
|
|
+ SIT_VBLOCK_MAP_SIZE);
|
|
|
+ } else {
|
|
|
+ memcpy(se->discard_map,
|
|
|
+ se->cur_valid_map,
|
|
|
+ SIT_VBLOCK_MAP_SIZE);
|
|
|
+ sbi->discard_blks +=
|
|
|
+ sbi->blocks_per_seg -
|
|
|
+ se->valid_blocks;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (sbi->segs_per_sec > 1)
|
|
@@ -2699,10 +3039,15 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
|
|
|
seg_info_from_raw_sit(se, &sit);
|
|
|
|
|
|
if (f2fs_discard_en(sbi)) {
|
|
|
- memcpy(se->discard_map, se->cur_valid_map,
|
|
|
- SIT_VBLOCK_MAP_SIZE);
|
|
|
- sbi->discard_blks += old_valid_blocks -
|
|
|
- se->valid_blocks;
|
|
|
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
|
|
|
+ memset(se->discard_map, 0xff,
|
|
|
+ SIT_VBLOCK_MAP_SIZE);
|
|
|
+ } else {
|
|
|
+ memcpy(se->discard_map, se->cur_valid_map,
|
|
|
+ SIT_VBLOCK_MAP_SIZE);
|
|
|
+ sbi->discard_blks += old_valid_blocks -
|
|
|
+ se->valid_blocks;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (sbi->segs_per_sec > 1)
|
|
@@ -2746,7 +3091,7 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
|
|
|
if (segno >= MAIN_SEGS(sbi))
|
|
|
break;
|
|
|
offset = segno + 1;
|
|
|
- valid_blocks = get_valid_blocks(sbi, segno, 0);
|
|
|
+ valid_blocks = get_valid_blocks(sbi, segno, false);
|
|
|
if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
|
|
|
continue;
|
|
|
if (valid_blocks > sbi->blocks_per_seg) {
|
|
@@ -2852,6 +3197,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
|
|
|
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
|
|
|
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
|
|
|
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
|
|
|
+ sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
|
|
|
|
|
|
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
|
|
|
|
|
@@ -2988,7 +3334,7 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi)
|
|
|
if (!sm_info)
|
|
|
return;
|
|
|
destroy_flush_cmd_control(sbi, true);
|
|
|
- destroy_discard_cmd_control(sbi, true);
|
|
|
+ destroy_discard_cmd_control(sbi);
|
|
|
destroy_dirty_segmap(sbi);
|
|
|
destroy_curseg(sbi);
|
|
|
destroy_free_segmap(sbi);
|