|
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
* caller can stall after page list has been processed.
|
|
|
*
|
|
|
* 2) Global or new memcg reclaim encounters a page that is
|
|
|
- * not marked for immediate reclaim or the caller does not
|
|
|
- * have __GFP_IO. In this case mark the page for immediate
|
|
|
+ * not marked for immediate reclaim, or the caller does not
|
|
|
+ * have __GFP_FS (or __GFP_IO if it's simply going to swap,
|
|
|
+ * not to fs). In this case mark the page for immediate
|
|
|
* reclaim and continue scanning.
|
|
|
*
|
|
|
- * __GFP_IO is checked because a loop driver thread might
|
|
|
+ * Require may_enter_fs because we would wait on fs, which
|
|
|
+ * may not have submitted IO yet. And the loop driver might
|
|
|
* enter reclaim, and deadlock if it waits on a page for
|
|
|
* which it is needed to do the write (loop masks off
|
|
|
* __GFP_IO|__GFP_FS for this reason); but more thought
|
|
|
* would probably show more reasons.
|
|
|
*
|
|
|
- * Don't require __GFP_FS, since we're not going into the
|
|
|
- * FS, just waiting on its writeback completion. Worryingly,
|
|
|
- * ext4 gfs2 and xfs allocate pages with
|
|
|
- * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
|
|
|
- * may_enter_fs here is liable to OOM on them.
|
|
|
- *
|
|
|
* 3) Legacy memcg encounters a page that is not already marked
|
|
|
* PageReclaim. memcg does not have any dirty pages
|
|
|
* throttling so we could easily OOM just because too many
|
|
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
|
|
|
/* Case 2 above */
|
|
|
} else if (sane_reclaim(sc) ||
|
|
|
- !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
|
|
|
+ !PageReclaim(page) || !may_enter_fs) {
|
|
|
/*
|
|
|
* This is slightly racy - end_page_writeback()
|
|
|
* might have just cleared PageReclaim, then
|