|
@@ -189,6 +189,9 @@ static void __wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
|
|
|
{
|
|
|
struct wb_writeback_work *work;
|
|
|
|
|
|
+ if (!wb_has_dirty_io(wb))
|
|
|
+ return;
|
|
|
+
|
|
|
/*
|
|
|
* This is WB_SYNC_NONE writeback, so if allocation fails just
|
|
|
* wakeup the thread for old dirty data writeback
|
|
@@ -1215,11 +1218,8 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
|
|
|
nr_pages = get_nr_dirty_pages();
|
|
|
|
|
|
rcu_read_lock();
|
|
|
- list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
|
|
|
- if (!bdi_has_dirty_io(bdi))
|
|
|
- continue;
|
|
|
+ list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
|
|
|
__wb_start_writeback(&bdi->wb, nr_pages, false, reason);
|
|
|
- }
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|
|
@@ -1512,11 +1512,12 @@ void writeback_inodes_sb_nr(struct super_block *sb,
|
|
|
.nr_pages = nr,
|
|
|
.reason = reason,
|
|
|
};
|
|
|
+ struct backing_dev_info *bdi = sb->s_bdi;
|
|
|
|
|
|
- if (sb->s_bdi == &noop_backing_dev_info)
|
|
|
+ if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
|
|
|
return;
|
|
|
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
|
|
- wb_queue_work(&sb->s_bdi->wb, &work);
|
|
|
+ wb_queue_work(&bdi->wb, &work);
|
|
|
wait_for_completion(&done);
|
|
|
}
|
|
|
EXPORT_SYMBOL(writeback_inodes_sb_nr);
|
|
@@ -1594,13 +1595,14 @@ void sync_inodes_sb(struct super_block *sb)
|
|
|
.reason = WB_REASON_SYNC,
|
|
|
.for_sync = 1,
|
|
|
};
|
|
|
+ struct backing_dev_info *bdi = sb->s_bdi;
|
|
|
|
|
|
/* Nothing to do? */
|
|
|
- if (sb->s_bdi == &noop_backing_dev_info)
|
|
|
+ if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
|
|
|
return;
|
|
|
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
|
|
|
|
|
- wb_queue_work(&sb->s_bdi->wb, &work);
|
|
|
+ wb_queue_work(&bdi->wb, &work);
|
|
|
wait_for_completion(&done);
|
|
|
|
|
|
wait_sb_inodes(sb);
|