|
@@ -1573,20 +1573,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
|
* If dirty pages are scanned that are not queued for IO, it
|
|
* If dirty pages are scanned that are not queued for IO, it
|
|
* implies that flushers are not keeping up. In this case, flag
|
|
* implies that flushers are not keeping up. In this case, flag
|
|
* the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
|
|
* the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
|
|
- * pages from reclaim context. It will forcibly stall in the
|
|
|
|
- * next check.
|
|
|
|
|
|
+ * pages from reclaim context.
|
|
*/
|
|
*/
|
|
if (nr_unqueued_dirty == nr_taken)
|
|
if (nr_unqueued_dirty == nr_taken)
|
|
zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
|
|
zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * In addition, if kswapd scans pages marked marked for
|
|
|
|
- * immediate reclaim and under writeback (nr_immediate), it
|
|
|
|
- * implies that pages are cycling through the LRU faster than
|
|
|
|
|
|
+ * If kswapd scans pages marked marked for immediate
|
|
|
|
+ * reclaim and under writeback (nr_immediate), it implies
|
|
|
|
+ * that pages are cycling through the LRU faster than
|
|
* they are written so also forcibly stall.
|
|
* they are written so also forcibly stall.
|
|
*/
|
|
*/
|
|
- if ((nr_unqueued_dirty == nr_taken || nr_immediate) &&
|
|
|
|
- current_may_throttle())
|
|
|
|
|
|
+ if (nr_immediate && current_may_throttle())
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
}
|
|
}
|
|
|
|
|