|
@@ -184,33 +184,6 @@ out_unlock:
|
|
|
spin_unlock_bh(&wb->work_lock);
|
|
|
}
|
|
|
|
|
|
-static void __wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
|
|
|
- bool range_cyclic, enum wb_reason reason)
|
|
|
-{
|
|
|
- struct wb_writeback_work *work;
|
|
|
-
|
|
|
- if (!wb_has_dirty_io(wb))
|
|
|
- return;
|
|
|
-
|
|
|
- /*
|
|
|
- * This is WB_SYNC_NONE writeback, so if allocation fails just
|
|
|
- * wakeup the thread for old dirty data writeback
|
|
|
- */
|
|
|
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
|
|
- if (!work) {
|
|
|
- trace_writeback_nowork(wb->bdi);
|
|
|
- wb_wakeup(wb);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- work->sync_mode = WB_SYNC_NONE;
|
|
|
- work->nr_pages = nr_pages;
|
|
|
- work->range_cyclic = range_cyclic;
|
|
|
- work->reason = reason;
|
|
|
-
|
|
|
- wb_queue_work(wb, work);
|
|
|
-}
|
|
|
-
|
|
|
#ifdef CONFIG_CGROUP_WRITEBACK
|
|
|
|
|
|
/**
|
|
@@ -240,22 +213,31 @@ EXPORT_SYMBOL_GPL(inode_congested);
|
|
|
|
|
|
#endif /* CONFIG_CGROUP_WRITEBACK */
|
|
|
|
|
|
-/**
|
|
|
- * bdi_start_writeback - start writeback
|
|
|
- * @bdi: the backing device to write from
|
|
|
- * @nr_pages: the number of pages to write
|
|
|
- * @reason: reason why some writeback work was initiated
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * This does WB_SYNC_NONE opportunistic writeback. The IO is only
|
|
|
- * started when this function returns, we make no guarantees on
|
|
|
- * completion. Caller need not hold sb s_umount semaphore.
|
|
|
- *
|
|
|
- */
|
|
|
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
|
|
- enum wb_reason reason)
|
|
|
+void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
|
|
|
+ bool range_cyclic, enum wb_reason reason)
|
|
|
{
|
|
|
- __wb_start_writeback(&bdi->wb, nr_pages, true, reason);
|
|
|
+ struct wb_writeback_work *work;
|
|
|
+
|
|
|
+ if (!wb_has_dirty_io(wb))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This is WB_SYNC_NONE writeback, so if allocation fails just
|
|
|
+ * wakeup the thread for old dirty data writeback
|
|
|
+ */
|
|
|
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
|
|
+ if (!work) {
|
|
|
+ trace_writeback_nowork(wb->bdi);
|
|
|
+ wb_wakeup(wb);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ work->sync_mode = WB_SYNC_NONE;
|
|
|
+ work->nr_pages = nr_pages;
|
|
|
+ work->range_cyclic = range_cyclic;
|
|
|
+ work->reason = reason;
|
|
|
+
|
|
|
+ wb_queue_work(wb, work);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1219,7 +1201,7 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
|
|
|
|
|
|
rcu_read_lock();
|
|
|
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
|
|
|
- __wb_start_writeback(&bdi->wb, nr_pages, false, reason);
|
|
|
+ wb_start_writeback(&bdi->wb, nr_pages, false, reason);
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|