compaction.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. #ifndef _LINUX_COMPACTION_H
  2. #define _LINUX_COMPACTION_H
  3. /* Return values for compact_zone() and try_to_compact_pages() */
  4. /* compaction didn't start as it was deferred due to past failures */
  5. #define COMPACT_DEFERRED 0
  6. /* compaction didn't start as it was not possible or direct reclaim was more suitable */
  7. #define COMPACT_SKIPPED 1
  8. /* compaction should continue to another pageblock */
  9. #define COMPACT_CONTINUE 2
  10. /* direct compaction partially compacted a zone and there are suitable pages */
  11. #define COMPACT_PARTIAL 3
  12. /* The full zone was compacted */
  13. #define COMPACT_COMPLETE 4
  14. /* When adding new state, please change compaction_status_string, too */
  15. /* Used to signal whether compaction detected need_sched() or lock contention */
  16. /* No contention detected */
  17. #define COMPACT_CONTENDED_NONE 0
  18. /* Either need_sched() was true or fatal signal pending */
  19. #define COMPACT_CONTENDED_SCHED 1
  20. /* Zone lock or lru_lock was contended in async compaction */
  21. #define COMPACT_CONTENDED_LOCK 2
  22. struct alloc_context; /* in mm/internal.h */
  23. #ifdef CONFIG_COMPACTION
  24. extern int sysctl_compact_memory;
  25. extern int sysctl_compaction_handler(struct ctl_table *table, int write,
  26. void __user *buffer, size_t *length, loff_t *ppos);
  27. extern int sysctl_extfrag_threshold;
  28. extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
  29. void __user *buffer, size_t *length, loff_t *ppos);
  30. extern int fragmentation_index(struct zone *zone, unsigned int order);
  31. extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
  32. int alloc_flags, const struct alloc_context *ac,
  33. enum migrate_mode mode, int *contended);
  34. extern void compact_pgdat(pg_data_t *pgdat, int order);
  35. extern void reset_isolation_suitable(pg_data_t *pgdat);
  36. extern unsigned long compaction_suitable(struct zone *zone, int order,
  37. int alloc_flags, int classzone_idx);
  38. /* Do not skip compaction more than 64 times */
  39. #define COMPACT_MAX_DEFER_SHIFT 6
  40. /*
  41. * Compaction is deferred when compaction fails to result in a page
  42. * allocation success. 1 << compact_defer_limit compactions are skipped up
  43. * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
  44. */
  45. static inline void defer_compaction(struct zone *zone, int order)
  46. {
  47. zone->compact_considered = 0;
  48. zone->compact_defer_shift++;
  49. if (order < zone->compact_order_failed)
  50. zone->compact_order_failed = order;
  51. if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
  52. zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
  53. }
  54. /* Returns true if compaction should be skipped this time */
  55. static inline bool compaction_deferred(struct zone *zone, int order)
  56. {
  57. unsigned long defer_limit = 1UL << zone->compact_defer_shift;
  58. if (order < zone->compact_order_failed)
  59. return false;
  60. /* Avoid possible overflow */
  61. if (++zone->compact_considered > defer_limit)
  62. zone->compact_considered = defer_limit;
  63. return zone->compact_considered < defer_limit;
  64. }
  65. /*
  66. * Update defer tracking counters after successful compaction of given order,
  67. * which means an allocation either succeeded (alloc_success == true) or is
  68. * expected to succeed.
  69. */
  70. static inline void compaction_defer_reset(struct zone *zone, int order,
  71. bool alloc_success)
  72. {
  73. if (alloc_success) {
  74. zone->compact_considered = 0;
  75. zone->compact_defer_shift = 0;
  76. }
  77. if (order >= zone->compact_order_failed)
  78. zone->compact_order_failed = order + 1;
  79. }
  80. /* Returns true if restarting compaction after many failures */
  81. static inline bool compaction_restarting(struct zone *zone, int order)
  82. {
  83. if (order < zone->compact_order_failed)
  84. return false;
  85. return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
  86. zone->compact_considered >= 1UL << zone->compact_defer_shift;
  87. }
  88. #else
  89. static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
  90. unsigned int order, int alloc_flags,
  91. const struct alloc_context *ac,
  92. enum migrate_mode mode, int *contended)
  93. {
  94. return COMPACT_CONTINUE;
  95. }
  96. static inline void compact_pgdat(pg_data_t *pgdat, int order)
  97. {
  98. }
  99. static inline void reset_isolation_suitable(pg_data_t *pgdat)
  100. {
  101. }
  102. static inline unsigned long compaction_suitable(struct zone *zone, int order,
  103. int alloc_flags, int classzone_idx)
  104. {
  105. return COMPACT_SKIPPED;
  106. }
  107. static inline void defer_compaction(struct zone *zone, int order)
  108. {
  109. }
  110. static inline bool compaction_deferred(struct zone *zone, int order)
  111. {
  112. return true;
  113. }
  114. #endif /* CONFIG_COMPACTION */
  115. #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
  116. extern int compaction_register_node(struct node *node);
  117. extern void compaction_unregister_node(struct node *node);
  118. #else
  119. static inline int compaction_register_node(struct node *node)
  120. {
  121. return 0;
  122. }
  123. static inline void compaction_unregister_node(struct node *node)
  124. {
  125. }
  126. #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
  127. #endif /* _LINUX_COMPACTION_H */