|
@@ -78,6 +78,70 @@ extern void compaction_defer_reset(struct zone *zone, int order,
|
|
|
bool alloc_success);
|
|
|
extern bool compaction_restarting(struct zone *zone, int order);
|
|
|
|
|
|
+/* Compaction has made some progress and retrying makes sense */
|
|
|
+static inline bool compaction_made_progress(enum compact_result result)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Even though this might sound confusing this in fact tells us
|
|
|
+ * that the compaction successfully isolated and migrated some
|
|
|
+ * pageblocks.
|
|
|
+ */
|
|
|
+ if (result == COMPACT_PARTIAL)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+/* Compaction has failed and it doesn't make much sense to keep retrying. */
|
|
|
+static inline bool compaction_failed(enum compact_result result)
|
|
|
+{
|
|
|
+ /* All zones were scanned completely and still not result. */
|
|
|
+ if (result == COMPACT_COMPLETE)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Compaction has backed off for some reason. It might be throttling or
|
|
|
+ * lock contention. Retrying is still worthwhile.
|
|
|
+ */
|
|
|
+static inline bool compaction_withdrawn(enum compact_result result)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Compaction backed off due to watermark checks for order-0
|
|
|
+ * so the regular reclaim has to try harder and reclaim something.
|
|
|
+ */
|
|
|
+ if (result == COMPACT_SKIPPED)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If compaction is deferred for high-order allocations, it is
|
|
|
+ * because sync compaction recently failed. If this is the case
|
|
|
+ * and the caller requested a THP allocation, we do not want
|
|
|
+ * to heavily disrupt the system, so we fail the allocation
|
|
|
+ * instead of entering direct reclaim.
|
|
|
+ */
|
|
|
+ if (result == COMPACT_DEFERRED)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If compaction in async mode encounters contention or blocks higher
|
|
|
+ * priority task we back off early rather than cause stalls.
|
|
|
+ */
|
|
|
+ if (result == COMPACT_CONTENDED)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Page scanners have met but we haven't scanned full zones so this
|
|
|
+ * is a back off in fact.
|
|
|
+ */
|
|
|
+ if (result == COMPACT_PARTIAL_SKIPPED)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
extern int kcompactd_run(int nid);
|
|
|
extern void kcompactd_stop(int nid);
|
|
|
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
|
|
@@ -114,6 +178,21 @@ static inline bool compaction_deferred(struct zone *zone, int order)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+static inline bool compaction_made_progress(enum compact_result result)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static inline bool compaction_failed(enum compact_result result)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static inline bool compaction_withdrawn(enum compact_result result)
|
|
|
+{
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
static inline int kcompactd_run(int nid)
|
|
|
{
|
|
|
return 0;
|