|
@@ -896,31 +896,31 @@ static wait_queue_head_t congestion_wqh[2] = {
|
|
|
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
|
|
|
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
|
|
|
};
|
|
|
-static atomic_t nr_bdi_congested[2];
|
|
|
+static atomic_t nr_wb_congested[2];
|
|
|
|
|
|
-void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
|
|
|
+void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
|
|
|
{
|
|
|
- enum wb_state bit;
|
|
|
wait_queue_head_t *wqh = &congestion_wqh[sync];
|
|
|
+ enum wb_state bit;
|
|
|
|
|
|
bit = sync ? WB_sync_congested : WB_async_congested;
|
|
|
- if (test_and_clear_bit(bit, &bdi->wb.congested->state))
|
|
|
- atomic_dec(&nr_bdi_congested[sync]);
|
|
|
+ if (test_and_clear_bit(bit, &congested->state))
|
|
|
+ atomic_dec(&nr_wb_congested[sync]);
|
|
|
smp_mb__after_atomic();
|
|
|
if (waitqueue_active(wqh))
|
|
|
wake_up(wqh);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(clear_bdi_congested);
|
|
|
+EXPORT_SYMBOL(clear_wb_congested);
|
|
|
|
|
|
-void set_bdi_congested(struct backing_dev_info *bdi, int sync)
|
|
|
+void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
|
|
|
{
|
|
|
enum wb_state bit;
|
|
|
|
|
|
bit = sync ? WB_sync_congested : WB_async_congested;
|
|
|
- if (!test_and_set_bit(bit, &bdi->wb.congested->state))
|
|
|
- atomic_inc(&nr_bdi_congested[sync]);
|
|
|
+ if (!test_and_set_bit(bit, &congested->state))
|
|
|
+ atomic_inc(&nr_wb_congested[sync]);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(set_bdi_congested);
|
|
|
+EXPORT_SYMBOL(set_wb_congested);
|
|
|
|
|
|
/**
|
|
|
* congestion_wait - wait for a backing_dev to become uncongested
|
|
@@ -979,7 +979,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
|
|
|
* encountered in the current zone, yield if necessary instead
|
|
|
* of sleeping on the congestion queue
|
|
|
*/
|
|
|
- if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
|
|
|
+ if (atomic_read(&nr_wb_congested[sync]) == 0 ||
|
|
|
!test_bit(ZONE_CONGESTED, &zone->flags)) {
|
|
|
cond_resched();
|
|
|
|