|
@@ -1,12 +1,9 @@
|
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
|
/*
|
|
|
* fs/f2fs/super.c
|
|
|
*
|
|
|
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
|
|
|
* http://www.samsung.com/
|
|
|
- *
|
|
|
- * This program is free software; you can redistribute it and/or modify
|
|
|
- * it under the terms of the GNU General Public License version 2 as
|
|
|
- * published by the Free Software Foundation.
|
|
|
*/
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/init.h>
|
|
@@ -53,9 +50,10 @@ char *f2fs_fault_name[FAULT_MAX] = {
|
|
|
[FAULT_DIR_DEPTH] = "too big dir depth",
|
|
|
[FAULT_EVICT_INODE] = "evict_inode fail",
|
|
|
[FAULT_TRUNCATE] = "truncate fail",
|
|
|
- [FAULT_IO] = "IO error",
|
|
|
+ [FAULT_READ_IO] = "read IO error",
|
|
|
[FAULT_CHECKPOINT] = "checkpoint error",
|
|
|
[FAULT_DISCARD] = "discard error",
|
|
|
+ [FAULT_WRITE_IO] = "write IO error",
|
|
|
};
|
|
|
|
|
|
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
|
|
@@ -138,6 +136,7 @@ enum {
|
|
|
Opt_alloc,
|
|
|
Opt_fsync,
|
|
|
Opt_test_dummy_encryption,
|
|
|
+ Opt_checkpoint,
|
|
|
Opt_err,
|
|
|
};
|
|
|
|
|
@@ -196,6 +195,7 @@ static match_table_t f2fs_tokens = {
|
|
|
{Opt_alloc, "alloc_mode=%s"},
|
|
|
{Opt_fsync, "fsync_mode=%s"},
|
|
|
{Opt_test_dummy_encryption, "test_dummy_encryption"},
|
|
|
+ {Opt_checkpoint, "checkpoint=%s"},
|
|
|
{Opt_err, NULL},
|
|
|
};
|
|
|
|
|
@@ -207,7 +207,7 @@ void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
|
|
|
va_start(args, fmt);
|
|
|
vaf.fmt = fmt;
|
|
|
vaf.va = &args;
|
|
|
- printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
|
|
|
+ printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
|
|
|
va_end(args);
|
|
|
}
|
|
|
|
|
@@ -360,7 +360,6 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
|
|
|
static int parse_options(struct super_block *sb, char *options)
|
|
|
{
|
|
|
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
|
|
- struct request_queue *q;
|
|
|
substring_t args[MAX_OPT_ARGS];
|
|
|
char *p, *name;
|
|
|
int arg = 0;
|
|
@@ -415,14 +414,7 @@ static int parse_options(struct super_block *sb, char *options)
|
|
|
return -EINVAL;
|
|
|
break;
|
|
|
case Opt_discard:
|
|
|
- q = bdev_get_queue(sb->s_bdev);
|
|
|
- if (blk_queue_discard(q)) {
|
|
|
- set_opt(sbi, DISCARD);
|
|
|
- } else if (!f2fs_sb_has_blkzoned(sb)) {
|
|
|
- f2fs_msg(sb, KERN_WARNING,
|
|
|
- "mounting with \"discard\" option, but "
|
|
|
- "the device does not support discard");
|
|
|
- }
|
|
|
+ set_opt(sbi, DISCARD);
|
|
|
break;
|
|
|
case Opt_nodiscard:
|
|
|
if (f2fs_sb_has_blkzoned(sb)) {
|
|
@@ -602,28 +594,31 @@ static int parse_options(struct super_block *sb, char *options)
|
|
|
}
|
|
|
F2FS_OPTION(sbi).write_io_size_bits = arg;
|
|
|
break;
|
|
|
+#ifdef CONFIG_F2FS_FAULT_INJECTION
|
|
|
case Opt_fault_injection:
|
|
|
if (args->from && match_int(args, &arg))
|
|
|
return -EINVAL;
|
|
|
-#ifdef CONFIG_F2FS_FAULT_INJECTION
|
|
|
f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
|
|
|
set_opt(sbi, FAULT_INJECTION);
|
|
|
-#else
|
|
|
- f2fs_msg(sb, KERN_INFO,
|
|
|
- "FAULT_INJECTION was not selected");
|
|
|
-#endif
|
|
|
break;
|
|
|
+
|
|
|
case Opt_fault_type:
|
|
|
if (args->from && match_int(args, &arg))
|
|
|
return -EINVAL;
|
|
|
-#ifdef CONFIG_F2FS_FAULT_INJECTION
|
|
|
f2fs_build_fault_attr(sbi, 0, arg);
|
|
|
set_opt(sbi, FAULT_INJECTION);
|
|
|
+ break;
|
|
|
#else
|
|
|
+ case Opt_fault_injection:
|
|
|
f2fs_msg(sb, KERN_INFO,
|
|
|
- "FAULT_INJECTION was not selected");
|
|
|
-#endif
|
|
|
+ "fault_injection options not supported");
|
|
|
+ break;
|
|
|
+
|
|
|
+ case Opt_fault_type:
|
|
|
+ f2fs_msg(sb, KERN_INFO,
|
|
|
+ "fault_type options not supported");
|
|
|
break;
|
|
|
+#endif
|
|
|
case Opt_lazytime:
|
|
|
sb->s_flags |= SB_LAZYTIME;
|
|
|
break;
|
|
@@ -776,6 +771,23 @@ static int parse_options(struct super_block *sb, char *options)
|
|
|
"Test dummy encryption mount option ignored");
|
|
|
#endif
|
|
|
break;
|
|
|
+ case Opt_checkpoint:
|
|
|
+ name = match_strdup(&args[0]);
|
|
|
+ if (!name)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ if (strlen(name) == 6 &&
|
|
|
+ !strncmp(name, "enable", 6)) {
|
|
|
+ clear_opt(sbi, DISABLE_CHECKPOINT);
|
|
|
+ } else if (strlen(name) == 7 &&
|
|
|
+ !strncmp(name, "disable", 7)) {
|
|
|
+ set_opt(sbi, DISABLE_CHECKPOINT);
|
|
|
+ } else {
|
|
|
+ kfree(name);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ kfree(name);
|
|
|
+ break;
|
|
|
default:
|
|
|
f2fs_msg(sb, KERN_ERR,
|
|
|
"Unrecognized mount option \"%s\" or missing value",
|
|
@@ -834,6 +846,12 @@ static int parse_options(struct super_block *sb, char *options)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) {
|
|
|
+ f2fs_msg(sb, KERN_ERR,
|
|
|
+ "LFS not compatible with checkpoint=disable\n");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
/* Not pass down write hints if the number of active logs is lesser
|
|
|
* than NR_CURSEG_TYPE.
|
|
|
*/
|
|
@@ -1021,8 +1039,8 @@ static void f2fs_put_super(struct super_block *sb)
|
|
|
* But, the previous checkpoint was not done by umount, it needs to do
|
|
|
* clean checkpoint again.
|
|
|
*/
|
|
|
- if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
|
|
|
- !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
|
|
|
+ if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
|
|
|
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
|
|
|
struct cp_control cpc = {
|
|
|
.reason = CP_UMOUNT,
|
|
|
};
|
|
@@ -1032,7 +1050,8 @@ static void f2fs_put_super(struct super_block *sb)
|
|
|
/* be sure to wait for any on-going discard commands */
|
|
|
dropped = f2fs_wait_discard_bios(sbi);
|
|
|
|
|
|
- if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
|
|
|
+ if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
|
|
|
+ !sbi->discard_blks && !dropped) {
|
|
|
struct cp_control cpc = {
|
|
|
.reason = CP_UMOUNT | CP_TRIMMED,
|
|
|
};
|
|
@@ -1093,6 +1112,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
|
|
|
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
|
|
return 0;
|
|
|
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
|
|
|
+ return 0;
|
|
|
|
|
|
trace_f2fs_sync_fs(sb, sync);
|
|
|
|
|
@@ -1192,6 +1213,11 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|
|
buf->f_blocks = total_count - start_count;
|
|
|
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
|
|
|
sbi->current_reserved_blocks;
|
|
|
+ if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
|
|
|
+ buf->f_bfree = 0;
|
|
|
+ else
|
|
|
+ buf->f_bfree -= sbi->unusable_block_count;
|
|
|
+
|
|
|
if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
|
|
|
buf->f_bavail = buf->f_bfree -
|
|
|
F2FS_OPTION(sbi).root_reserved_blocks;
|
|
@@ -1336,7 +1362,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
|
|
|
from_kgid_munged(&init_user_ns,
|
|
|
F2FS_OPTION(sbi).s_resgid));
|
|
|
if (F2FS_IO_SIZE_BITS(sbi))
|
|
|
- seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
|
|
|
+ seq_printf(seq, ",io_bits=%u",
|
|
|
+ F2FS_OPTION(sbi).write_io_size_bits);
|
|
|
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
|
|
if (test_opt(sbi, FAULT_INJECTION)) {
|
|
|
seq_printf(seq, ",fault_injection=%u",
|
|
@@ -1370,6 +1397,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
|
|
|
else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
|
|
|
seq_printf(seq, ",alloc_mode=%s", "reuse");
|
|
|
|
|
|
+ if (test_opt(sbi, DISABLE_CHECKPOINT))
|
|
|
+ seq_puts(seq, ",checkpoint=disable");
|
|
|
+
|
|
|
if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
|
|
|
seq_printf(seq, ",fsync_mode=%s", "posix");
|
|
|
else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
|
|
@@ -1397,10 +1427,10 @@ static void default_options(struct f2fs_sb_info *sbi)
|
|
|
set_opt(sbi, INLINE_DENTRY);
|
|
|
set_opt(sbi, EXTENT_CACHE);
|
|
|
set_opt(sbi, NOHEAP);
|
|
|
+ clear_opt(sbi, DISABLE_CHECKPOINT);
|
|
|
sbi->sb->s_flags |= SB_LAZYTIME;
|
|
|
set_opt(sbi, FLUSH_MERGE);
|
|
|
- if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
|
|
|
- set_opt(sbi, DISCARD);
|
|
|
+ set_opt(sbi, DISCARD);
|
|
|
if (f2fs_sb_has_blkzoned(sbi->sb))
|
|
|
set_opt_mode(sbi, F2FS_MOUNT_LFS);
|
|
|
else
|
|
@@ -1419,6 +1449,57 @@ static void default_options(struct f2fs_sb_info *sbi)
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
static int f2fs_enable_quotas(struct super_block *sb);
|
|
|
#endif
|
|
|
+
|
|
|
+static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
|
|
+{
|
|
|
+ struct cp_control cpc;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ sbi->sb->s_flags |= SB_ACTIVE;
|
|
|
+
|
|
|
+ mutex_lock(&sbi->gc_mutex);
|
|
|
+ f2fs_update_time(sbi, DISABLE_TIME);
|
|
|
+
|
|
|
+ while (!f2fs_time_over(sbi, DISABLE_TIME)) {
|
|
|
+ err = f2fs_gc(sbi, true, false, NULL_SEGNO);
|
|
|
+ if (err == -ENODATA)
|
|
|
+ break;
|
|
|
+ if (err && err != -EAGAIN) {
|
|
|
+ mutex_unlock(&sbi->gc_mutex);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ mutex_unlock(&sbi->gc_mutex);
|
|
|
+
|
|
|
+ err = sync_filesystem(sbi->sb);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ if (f2fs_disable_cp_again(sbi))
|
|
|
+ return -EAGAIN;
|
|
|
+
|
|
|
+ mutex_lock(&sbi->gc_mutex);
|
|
|
+ cpc.reason = CP_PAUSE;
|
|
|
+ set_sbi_flag(sbi, SBI_CP_DISABLED);
|
|
|
+ f2fs_write_checkpoint(sbi, &cpc);
|
|
|
+
|
|
|
+ sbi->unusable_block_count = 0;
|
|
|
+ mutex_unlock(&sbi->gc_mutex);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
|
|
|
+{
|
|
|
+ mutex_lock(&sbi->gc_mutex);
|
|
|
+ f2fs_dirty_to_prefree(sbi);
|
|
|
+
|
|
|
+ clear_sbi_flag(sbi, SBI_CP_DISABLED);
|
|
|
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
|
|
|
+ mutex_unlock(&sbi->gc_mutex);
|
|
|
+
|
|
|
+ f2fs_sync_fs(sbi->sb, 1);
|
|
|
+}
|
|
|
+
|
|
|
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
|
|
{
|
|
|
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
|
@@ -1428,6 +1509,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
|
|
bool need_restart_gc = false;
|
|
|
bool need_stop_gc = false;
|
|
|
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
|
|
|
+ bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
|
|
|
+ bool checkpoint_changed;
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
int i, j;
|
|
|
#endif
|
|
@@ -1472,6 +1555,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
|
|
err = parse_options(sb, data);
|
|
|
if (err)
|
|
|
goto restore_opts;
|
|
|
+ checkpoint_changed =
|
|
|
+ disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
|
|
|
|
|
|
/*
|
|
|
* Previous and new state of filesystem is RO,
|
|
@@ -1485,7 +1570,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
|
|
err = dquot_suspend(sb, -1);
|
|
|
if (err < 0)
|
|
|
goto restore_opts;
|
|
|
- } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) {
|
|
|
+ } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
|
|
|
/* dquot_resume needs RW */
|
|
|
sb->s_flags &= ~SB_RDONLY;
|
|
|
if (sb_any_quota_suspended(sb)) {
|
|
@@ -1505,6 +1590,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
|
|
goto restore_opts;
|
|
|
}
|
|
|
|
|
|
+ if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
|
|
|
+ err = -EINVAL;
|
|
|
+ f2fs_msg(sbi->sb, KERN_WARNING,
|
|
|
+ "disabling checkpoint not compatible with read-only");
|
|
|
+ goto restore_opts;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* We stop the GC thread if FS is mounted as RO
|
|
|
* or if background_gc = off is passed in mount
|
|
@@ -1533,6 +1625,16 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
|
|
clear_sbi_flag(sbi, SBI_IS_CLOSE);
|
|
|
}
|
|
|
|
|
|
+ if (checkpoint_changed) {
|
|
|
+ if (test_opt(sbi, DISABLE_CHECKPOINT)) {
|
|
|
+ err = f2fs_disable_checkpoint(sbi);
|
|
|
+ if (err)
|
|
|
+ goto restore_gc;
|
|
|
+ } else {
|
|
|
+ f2fs_enable_checkpoint(sbi);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* We stop issue flush thread if FS is mounted as RO
|
|
|
* or if flush_merge is not passed in mount option.
|
|
@@ -1556,6 +1658,7 @@ skip:
|
|
|
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
|
|
|
|
|
|
limit_reserve_root(sbi);
|
|
|
+ *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
|
|
|
return 0;
|
|
|
restore_gc:
|
|
|
if (need_restart_gc) {
|
|
@@ -1608,6 +1711,7 @@ repeat:
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
|
|
goto repeat;
|
|
|
}
|
|
|
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
return PTR_ERR(page);
|
|
|
}
|
|
|
|
|
@@ -1619,6 +1723,7 @@ repeat:
|
|
|
}
|
|
|
if (unlikely(!PageUptodate(page))) {
|
|
|
f2fs_put_page(page, 1);
|
|
|
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
return -EIO;
|
|
|
}
|
|
|
|
|
@@ -1660,6 +1765,7 @@ retry:
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
|
|
goto retry;
|
|
|
}
|
|
|
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -1696,6 +1802,12 @@ static qsize_t *f2fs_get_reserved_space(struct inode *inode)
|
|
|
|
|
|
static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
|
|
|
{
|
|
|
+ if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
|
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
|
+ "quota sysfile may be corrupted, skip loading it");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
|
|
|
F2FS_OPTION(sbi).s_jquota_fmt, type);
|
|
|
}
|
|
@@ -1766,7 +1878,14 @@ static int f2fs_enable_quotas(struct super_block *sb)
|
|
|
test_opt(F2FS_SB(sb), PRJQUOTA),
|
|
|
};
|
|
|
|
|
|
- sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
|
|
|
+ if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
|
|
|
+ f2fs_msg(sb, KERN_ERR,
|
|
|
+ "quota file may be corrupted, skip loading it");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
|
|
|
+
|
|
|
for (type = 0; type < MAXQUOTAS; type++) {
|
|
|
qf_inum = f2fs_qf_ino(sb, type);
|
|
|
if (qf_inum) {
|
|
@@ -1780,6 +1899,8 @@ static int f2fs_enable_quotas(struct super_block *sb)
|
|
|
"fsck to fix.", type, err);
|
|
|
for (type--; type >= 0; type--)
|
|
|
dquot_quota_off(sb, type);
|
|
|
+ set_sbi_flag(F2FS_SB(sb),
|
|
|
+ SBI_QUOTA_NEED_REPAIR);
|
|
|
return err;
|
|
|
}
|
|
|
}
|
|
@@ -1787,35 +1908,51 @@ static int f2fs_enable_quotas(struct super_block *sb)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int f2fs_quota_sync(struct super_block *sb, int type)
|
|
|
+int f2fs_quota_sync(struct super_block *sb, int type)
|
|
|
{
|
|
|
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
|
|
struct quota_info *dqopt = sb_dqopt(sb);
|
|
|
int cnt;
|
|
|
int ret;
|
|
|
|
|
|
ret = dquot_writeback_dquots(sb, type);
|
|
|
if (ret)
|
|
|
- return ret;
|
|
|
+ goto out;
|
|
|
|
|
|
/*
|
|
|
* Now when everything is written we can discard the pagecache so
|
|
|
* that userspace sees the changes.
|
|
|
*/
|
|
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
|
|
+ struct address_space *mapping;
|
|
|
+
|
|
|
if (type != -1 && cnt != type)
|
|
|
continue;
|
|
|
if (!sb_has_quota_active(sb, cnt))
|
|
|
continue;
|
|
|
|
|
|
- ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
|
|
|
+ mapping = dqopt->files[cnt]->i_mapping;
|
|
|
+
|
|
|
+ ret = filemap_fdatawrite(mapping);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /* if we are using journalled quota */
|
|
|
+ if (is_journalled_quota(sbi))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ ret = filemap_fdatawait(mapping);
|
|
|
if (ret)
|
|
|
- return ret;
|
|
|
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
|
|
|
inode_lock(dqopt->files[cnt]);
|
|
|
truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
|
|
|
inode_unlock(dqopt->files[cnt]);
|
|
|
}
|
|
|
- return 0;
|
|
|
+out:
|
|
|
+ if (ret)
|
|
|
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
|
|
@@ -1836,8 +1973,7 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
|
|
|
|
|
|
inode_lock(inode);
|
|
|
F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
|
|
|
- inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
|
|
|
- S_NOATIME | S_IMMUTABLE);
|
|
|
+ f2fs_set_inode_flags(inode);
|
|
|
inode_unlock(inode);
|
|
|
f2fs_mark_inode_dirty_sync(inode, false);
|
|
|
|
|
@@ -1852,7 +1988,9 @@ static int f2fs_quota_off(struct super_block *sb, int type)
|
|
|
if (!inode || !igrab(inode))
|
|
|
return dquot_quota_off(sb, type);
|
|
|
|
|
|
- f2fs_quota_sync(sb, type);
|
|
|
+ err = f2fs_quota_sync(sb, type);
|
|
|
+ if (err)
|
|
|
+ goto out_put;
|
|
|
|
|
|
err = dquot_quota_off(sb, type);
|
|
|
if (err || f2fs_sb_has_quota_ino(sb))
|
|
@@ -1860,7 +1998,7 @@ static int f2fs_quota_off(struct super_block *sb, int type)
|
|
|
|
|
|
inode_lock(inode);
|
|
|
F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
|
|
|
- inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
|
|
|
+ f2fs_set_inode_flags(inode);
|
|
|
inode_unlock(inode);
|
|
|
f2fs_mark_inode_dirty_sync(inode, false);
|
|
|
out_put:
|
|
@@ -1871,9 +2009,88 @@ out_put:
|
|
|
void f2fs_quota_off_umount(struct super_block *sb)
|
|
|
{
|
|
|
int type;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ for (type = 0; type < MAXQUOTAS; type++) {
|
|
|
+ err = f2fs_quota_off(sb, type);
|
|
|
+ if (err) {
|
|
|
+ int ret = dquot_quota_off(sb, type);
|
|
|
+
|
|
|
+ f2fs_msg(sb, KERN_ERR,
|
|
|
+ "Fail to turn off disk quota "
|
|
|
+ "(type: %d, err: %d, ret:%d), Please "
|
|
|
+ "run fsck to fix it.", type, err, ret);
|
|
|
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
|
|
|
+{
|
|
|
+ struct quota_info *dqopt = sb_dqopt(sb);
|
|
|
+ int type;
|
|
|
+
|
|
|
+ for (type = 0; type < MAXQUOTAS; type++) {
|
|
|
+ if (!dqopt->files[type])
|
|
|
+ continue;
|
|
|
+ f2fs_inode_synced(dqopt->files[type]);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int f2fs_dquot_commit(struct dquot *dquot)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = dquot_commit(dquot);
|
|
|
+ if (ret < 0)
|
|
|
+ set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int f2fs_dquot_acquire(struct dquot *dquot)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = dquot_acquire(dquot);
|
|
|
+ if (ret < 0)
|
|
|
+ set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int f2fs_dquot_release(struct dquot *dquot)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = dquot_release(dquot);
|
|
|
+ if (ret < 0)
|
|
|
+ set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
|
|
|
+{
|
|
|
+ struct super_block *sb = dquot->dq_sb;
|
|
|
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = dquot_mark_dquot_dirty(dquot);
|
|
|
+
|
|
|
+ /* if we are using journalled quota */
|
|
|
+ if (is_journalled_quota(sbi))
|
|
|
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
|
|
|
- for (type = 0; type < MAXQUOTAS; type++)
|
|
|
- f2fs_quota_off(sb, type);
|
|
|
+static int f2fs_dquot_commit_info(struct super_block *sb, int type)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = dquot_commit_info(sb, type);
|
|
|
+ if (ret < 0)
|
|
|
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
|
|
@@ -1884,11 +2101,11 @@ static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
|
|
|
|
|
|
static const struct dquot_operations f2fs_quota_operations = {
|
|
|
.get_reserved_space = f2fs_get_reserved_space,
|
|
|
- .write_dquot = dquot_commit,
|
|
|
- .acquire_dquot = dquot_acquire,
|
|
|
- .release_dquot = dquot_release,
|
|
|
- .mark_dirty = dquot_mark_dquot_dirty,
|
|
|
- .write_info = dquot_commit_info,
|
|
|
+ .write_dquot = f2fs_dquot_commit,
|
|
|
+ .acquire_dquot = f2fs_dquot_acquire,
|
|
|
+ .release_dquot = f2fs_dquot_release,
|
|
|
+ .mark_dirty = f2fs_dquot_mark_dquot_dirty,
|
|
|
+ .write_info = f2fs_dquot_commit_info,
|
|
|
.alloc_dquot = dquot_alloc,
|
|
|
.destroy_dquot = dquot_destroy,
|
|
|
.get_projid = f2fs_get_projid,
|
|
@@ -1906,6 +2123,11 @@ static const struct quotactl_ops f2fs_quotactl_ops = {
|
|
|
.get_nextdqblk = dquot_get_next_dqblk,
|
|
|
};
|
|
|
#else
|
|
|
+int f2fs_quota_sync(struct super_block *sb, int type)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
void f2fs_quota_off_umount(struct super_block *sb)
|
|
|
{
|
|
|
}
|
|
@@ -2170,6 +2392,26 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
|
|
(bh->b_data + F2FS_SUPER_OFFSET);
|
|
|
struct super_block *sb = sbi->sb;
|
|
|
unsigned int blocksize;
|
|
|
+ size_t crc_offset = 0;
|
|
|
+ __u32 crc = 0;
|
|
|
+
|
|
|
+ /* Check checksum_offset and crc in superblock */
|
|
|
+ if (le32_to_cpu(raw_super->feature) & F2FS_FEATURE_SB_CHKSUM) {
|
|
|
+ crc_offset = le32_to_cpu(raw_super->checksum_offset);
|
|
|
+ if (crc_offset !=
|
|
|
+ offsetof(struct f2fs_super_block, crc)) {
|
|
|
+ f2fs_msg(sb, KERN_INFO,
|
|
|
+ "Invalid SB checksum offset: %zu",
|
|
|
+ crc_offset);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ crc = le32_to_cpu(raw_super->crc);
|
|
|
+ if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
|
|
|
+ f2fs_msg(sb, KERN_INFO,
|
|
|
+ "Invalid SB checksum value: %u", crc);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
|
|
|
f2fs_msg(sb, KERN_INFO,
|
|
@@ -2320,7 +2562,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|
|
unsigned int segment_count_main;
|
|
|
unsigned int cp_pack_start_sum, cp_payload;
|
|
|
block_t user_block_count;
|
|
|
- int i;
|
|
|
+ int i, j;
|
|
|
|
|
|
total = le32_to_cpu(raw_super->segment_count);
|
|
|
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
|
|
@@ -2361,11 +2603,43 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|
|
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
|
|
|
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
|
|
|
return 1;
|
|
|
+ for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
|
|
|
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
|
|
|
+ le32_to_cpu(ckpt->cur_node_segno[j])) {
|
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
|
+ "Node segment (%u, %u) has the same "
|
|
|
+ "segno: %u", i, j,
|
|
|
+ le32_to_cpu(ckpt->cur_node_segno[i]));
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
|
|
|
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
|
|
|
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
|
|
|
return 1;
|
|
|
+ for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
|
|
|
+ if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
|
|
|
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
|
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
|
+ "Data segment (%u, %u) has the same "
|
|
|
+ "segno: %u", i, j,
|
|
|
+ le32_to_cpu(ckpt->cur_data_segno[i]));
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
|
|
|
+ for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
|
|
|
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
|
|
|
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
|
|
|
+ f2fs_msg(sbi->sb, KERN_ERR,
|
|
|
+ "Data segment (%u) and Data segment (%u)"
|
|
|
+ " has the same segno: %u", i, j,
|
|
|
+ le32_to_cpu(ckpt->cur_node_segno[i]));
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
|
|
@@ -2423,6 +2697,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
|
|
|
sbi->dir_level = DEF_DIR_LEVEL;
|
|
|
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
|
|
|
sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
|
|
|
+ sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
|
|
|
+ sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
|
|
|
+ sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
|
|
|
clear_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
|
|
|
|
for (i = 0; i < NR_COUNT_TYPE; i++)
|
|
@@ -2453,8 +2730,12 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- return percpu_counter_init(&sbi->total_valid_inode_count, 0,
|
|
|
+ err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
|
|
|
GFP_KERNEL);
|
|
|
+ if (err)
|
|
|
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
|
|
|
+
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_BLK_DEV_ZONED
|
|
@@ -2589,6 +2870,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
|
|
|
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
|
|
|
{
|
|
|
struct buffer_head *bh;
|
|
|
+ __u32 crc = 0;
|
|
|
int err;
|
|
|
|
|
|
if ((recover && f2fs_readonly(sbi->sb)) ||
|
|
@@ -2597,6 +2879,13 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
|
|
|
return -EROFS;
|
|
|
}
|
|
|
|
|
|
+ /* we should update superblock crc here */
|
|
|
+ if (!recover && f2fs_sb_has_sb_chksum(sbi->sb)) {
|
|
|
+ crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
|
|
|
+ offsetof(struct f2fs_super_block, crc));
|
|
|
+ F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
|
|
|
+ }
|
|
|
+
|
|
|
/* write back-up superblock first */
|
|
|
bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
|
|
|
if (!bh)
|
|
@@ -2866,7 +3155,7 @@ try_onemore:
|
|
|
GFP_KERNEL);
|
|
|
if (!sbi->write_io[i]) {
|
|
|
err = -ENOMEM;
|
|
|
- goto free_options;
|
|
|
+ goto free_bio_info;
|
|
|
}
|
|
|
|
|
|
for (j = HOT; j < n; j++) {
|
|
@@ -2909,6 +3198,9 @@ try_onemore:
|
|
|
goto free_meta_inode;
|
|
|
}
|
|
|
|
|
|
+ if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
|
|
|
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
|
|
|
+
|
|
|
/* Initialize device list */
|
|
|
err = f2fs_scan_devices(sbi);
|
|
|
if (err) {
|
|
@@ -3007,11 +3299,9 @@ try_onemore:
|
|
|
/* Enable quota usage during mount */
|
|
|
if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
|
|
|
err = f2fs_enable_quotas(sb);
|
|
|
- if (err) {
|
|
|
+ if (err)
|
|
|
f2fs_msg(sb, KERN_ERR,
|
|
|
"Cannot turn on quotas: error %d", err);
|
|
|
- goto free_sysfs;
|
|
|
- }
|
|
|
}
|
|
|
#endif
|
|
|
/* if there are nt orphan nodes free them */
|
|
@@ -3019,6 +3309,9 @@ try_onemore:
|
|
|
if (err)
|
|
|
goto free_meta;
|
|
|
|
|
|
+ if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
|
|
|
+ goto skip_recovery;
|
|
|
+
|
|
|
/* recover fsynced data */
|
|
|
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
|
|
|
/*
|
|
@@ -3058,6 +3351,14 @@ skip_recovery:
|
|
|
/* f2fs_recover_fsync_data() cleared this already */
|
|
|
clear_sbi_flag(sbi, SBI_POR_DOING);
|
|
|
|
|
|
+ if (test_opt(sbi, DISABLE_CHECKPOINT)) {
|
|
|
+ err = f2fs_disable_checkpoint(sbi);
|
|
|
+ if (err)
|
|
|
+ goto free_meta;
|
|
|
+ } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
|
|
|
+ f2fs_enable_checkpoint(sbi);
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* If filesystem is not mounted as read-only then
|
|
|
* do start the gc_thread.
|
|
@@ -3090,10 +3391,10 @@ skip_recovery:
|
|
|
|
|
|
free_meta:
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
+ f2fs_truncate_quota_inode_pages(sb);
|
|
|
if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
|
|
|
f2fs_quota_off_umount(sbi->sb);
|
|
|
#endif
|
|
|
- f2fs_sync_inode_meta(sbi);
|
|
|
/*
|
|
|
* Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
|
|
|
* failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
|
|
@@ -3101,9 +3402,6 @@ free_meta:
|
|
|
* falls into an infinite loop in f2fs_sync_meta_pages().
|
|
|
*/
|
|
|
truncate_inode_pages_final(META_MAPPING(sbi));
|
|
|
-#ifdef CONFIG_QUOTA
|
|
|
-free_sysfs:
|
|
|
-#endif
|
|
|
f2fs_unregister_sysfs(sbi);
|
|
|
free_root_inode:
|
|
|
dput(sb->s_root);
|
|
@@ -3175,6 +3473,9 @@ static void kill_f2fs_super(struct super_block *sb)
|
|
|
};
|
|
|
f2fs_write_checkpoint(sbi, &cpc);
|
|
|
}
|
|
|
+
|
|
|
+ if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
|
|
|
+ sb->s_flags &= ~SB_RDONLY;
|
|
|
}
|
|
|
kill_block_super(sb);
|
|
|
}
|