|
@@ -89,11 +89,27 @@ static void flush_old_commits(struct work_struct *work)
|
|
|
sbi = container_of(work, struct reiserfs_sb_info, old_work.work);
|
|
|
s = sbi->s_journal->j_work_sb;
|
|
|
|
|
|
+ /*
|
|
|
+ * We need s_umount for protecting quota writeback. We have to use
|
|
|
+ * trylock as reiserfs_cancel_old_flush() may be waiting for this work
|
|
|
+ * to complete with s_umount held.
|
|
|
+ */
|
|
|
+ if (!down_read_trylock(&s->s_umount)) {
|
|
|
+ /* Requeue work if we are not cancelling it */
|
|
|
+ spin_lock(&sbi->old_work_lock);
|
|
|
+ if (sbi->work_queued == 1)
|
|
|
+ queue_delayed_work(system_long_wq, &sbi->old_work, HZ);
|
|
|
+ spin_unlock(&sbi->old_work_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
spin_lock(&sbi->old_work_lock);
|
|
|
- sbi->work_queued = 0;
|
|
|
+ /* Avoid clobbering the cancel state... */
|
|
|
+ if (sbi->work_queued == 1)
|
|
|
+ sbi->work_queued = 0;
|
|
|
spin_unlock(&sbi->old_work_lock);
|
|
|
|
|
|
reiserfs_sync_fs(s, 1);
|
|
|
+ up_read(&s->s_umount);
|
|
|
}
|
|
|
|
|
|
void reiserfs_schedule_old_flush(struct super_block *s)
|
|
@@ -117,21 +133,22 @@ void reiserfs_schedule_old_flush(struct super_block *s)
|
|
|
spin_unlock(&sbi->old_work_lock);
|
|
|
}
|
|
|
|
|
|
-static void cancel_old_flush(struct super_block *s)
|
|
|
+void reiserfs_cancel_old_flush(struct super_block *s)
|
|
|
{
|
|
|
struct reiserfs_sb_info *sbi = REISERFS_SB(s);
|
|
|
|
|
|
- cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
|
|
|
spin_lock(&sbi->old_work_lock);
|
|
|
- sbi->work_queued = 0;
|
|
|
+ /* Make sure no new flushes will be queued */
|
|
|
+ sbi->work_queued = 2;
|
|
|
spin_unlock(&sbi->old_work_lock);
|
|
|
+ cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
|
|
|
}
|
|
|
|
|
|
static int reiserfs_freeze(struct super_block *s)
|
|
|
{
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
|
|
|
- cancel_old_flush(s);
|
|
|
+ reiserfs_cancel_old_flush(s);
|
|
|
|
|
|
reiserfs_write_lock(s);
|
|
|
if (!(s->s_flags & MS_RDONLY)) {
|
|
@@ -152,7 +169,13 @@ static int reiserfs_freeze(struct super_block *s)
|
|
|
|
|
|
static int reiserfs_unfreeze(struct super_block *s)
|
|
|
{
|
|
|
+ struct reiserfs_sb_info *sbi = REISERFS_SB(s);
|
|
|
+
|
|
|
reiserfs_allow_writes(s);
|
|
|
+ spin_lock(&sbi->old_work_lock);
|
|
|
+ /* Allow old_work to run again */
|
|
|
+ sbi->work_queued = 0;
|
|
|
+ spin_unlock(&sbi->old_work_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -547,12 +570,28 @@ static void reiserfs_kill_sb(struct super_block *s)
|
|
|
kill_block_super(s);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_QUOTA
|
|
|
+static int reiserfs_quota_off(struct super_block *sb, int type);
|
|
|
+
|
|
|
+static void reiserfs_quota_off_umount(struct super_block *s)
|
|
|
+{
|
|
|
+ int type;
|
|
|
+
|
|
|
+ for (type = 0; type < REISERFS_MAXQUOTAS; type++)
|
|
|
+ reiserfs_quota_off(s, type);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline void reiserfs_quota_off_umount(struct super_block *s)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static void reiserfs_put_super(struct super_block *s)
|
|
|
{
|
|
|
struct reiserfs_transaction_handle th;
|
|
|
th.t_trans_id = 0;
|
|
|
|
|
|
- dquot_disable(s, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
|
|
|
+ reiserfs_quota_off_umount(s);
|
|
|
|
|
|
reiserfs_write_lock(s);
|
|
|
|
|
@@ -817,7 +856,7 @@ static const struct dquot_operations reiserfs_quota_operations = {
|
|
|
|
|
|
static const struct quotactl_ops reiserfs_qctl_operations = {
|
|
|
.quota_on = reiserfs_quota_on,
|
|
|
- .quota_off = dquot_quota_off,
|
|
|
+ .quota_off = reiserfs_quota_off,
|
|
|
.quota_sync = dquot_quota_sync,
|
|
|
.get_state = dquot_get_state,
|
|
|
.set_info = dquot_set_dqinfo,
|
|
@@ -2194,7 +2233,7 @@ error_unlocked:
|
|
|
if (sbi->commit_wq)
|
|
|
destroy_workqueue(sbi->commit_wq);
|
|
|
|
|
|
- cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
|
|
|
+ reiserfs_cancel_old_flush(s);
|
|
|
|
|
|
reiserfs_free_bitmap_cache(s);
|
|
|
if (SB_BUFFER_WITH_SB(s))
|
|
@@ -2405,12 +2444,47 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
|
|
|
goto out;
|
|
|
}
|
|
|
reiserfs_write_unlock(sb);
|
|
|
- return dquot_quota_on(sb, type, format_id, path);
|
|
|
+ err = dquot_quota_on(sb, type, format_id, path);
|
|
|
+ if (!err) {
|
|
|
+ inode_lock(inode);
|
|
|
+ REISERFS_I(inode)->i_attrs |= REISERFS_IMMUTABLE_FL |
|
|
|
+ REISERFS_NOATIME_FL;
|
|
|
+ inode_set_flags(inode, S_IMMUTABLE | S_NOATIME,
|
|
|
+ S_IMMUTABLE | S_NOATIME);
|
|
|
+ inode_unlock(inode);
|
|
|
+ mark_inode_dirty(inode);
|
|
|
+ }
|
|
|
+ return err;
|
|
|
out:
|
|
|
reiserfs_write_unlock(sb);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static int reiserfs_quota_off(struct super_block *sb, int type)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ struct inode *inode = sb_dqopt(sb)->files[type];
|
|
|
+
|
|
|
+ if (!inode || !igrab(inode))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ err = dquot_quota_off(sb, type);
|
|
|
+ if (err)
|
|
|
+ goto out_put;
|
|
|
+
|
|
|
+ inode_lock(inode);
|
|
|
+ REISERFS_I(inode)->i_attrs &= ~(REISERFS_IMMUTABLE_FL |
|
|
|
+ REISERFS_NOATIME_FL);
|
|
|
+ inode_set_flags(inode, 0, S_IMMUTABLE | S_NOATIME);
|
|
|
+ inode_unlock(inode);
|
|
|
+ mark_inode_dirty(inode);
|
|
|
+out_put:
|
|
|
+ iput(inode);
|
|
|
+ return err;
|
|
|
+out:
|
|
|
+ return dquot_quota_off(sb, type);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Read data from quotafile - avoid pagecache and such because we cannot afford
|
|
|
* acquiring the locks... As quota files are never truncated and quota code
|