migrate.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. #ifndef _LINUX_MIGRATE_H
  2. #define _LINUX_MIGRATE_H
  3. #include <linux/mm.h>
  4. #include <linux/mempolicy.h>
  5. #include <linux/migrate_mode.h>
  6. #include <linux/hugetlb.h>
  7. typedef struct page *new_page_t(struct page *page, unsigned long private,
  8. int **reason);
  9. typedef void free_page_t(struct page *page, unsigned long private);
  10. /*
  11. * Return values from addresss_space_operations.migratepage():
  12. * - negative errno on page migration failure;
  13. * - zero on page migration success;
  14. */
  15. #define MIGRATEPAGE_SUCCESS 0
  16. enum migrate_reason {
  17. MR_COMPACTION,
  18. MR_MEMORY_FAILURE,
  19. MR_MEMORY_HOTPLUG,
  20. MR_SYSCALL, /* also applies to cpusets */
  21. MR_MEMPOLICY_MBIND,
  22. MR_NUMA_MISPLACED,
  23. MR_CMA,
  24. MR_TYPES
  25. };
  26. /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
  27. extern char *migrate_reason_names[MR_TYPES];
  28. static inline struct page *new_page_nodemask(struct page *page,
  29. int preferred_nid, nodemask_t *nodemask)
  30. {
  31. gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
  32. unsigned int order = 0;
  33. struct page *new_page = NULL;
  34. if (PageHuge(page))
  35. return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
  36. preferred_nid, nodemask);
  37. if (thp_migration_supported() && PageTransHuge(page)) {
  38. order = HPAGE_PMD_ORDER;
  39. gfp_mask |= GFP_TRANSHUGE;
  40. }
  41. if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
  42. gfp_mask |= __GFP_HIGHMEM;
  43. new_page = __alloc_pages_nodemask(gfp_mask, order,
  44. preferred_nid, nodemask);
  45. if (new_page && PageTransHuge(page))
  46. prep_transhuge_page(new_page);
  47. return new_page;
  48. }
  49. #ifdef CONFIG_MIGRATION
  50. extern void putback_movable_pages(struct list_head *l);
  51. extern int migrate_page(struct address_space *mapping,
  52. struct page *newpage, struct page *page,
  53. enum migrate_mode mode);
  54. extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
  55. unsigned long private, enum migrate_mode mode, int reason);
  56. extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
  57. extern void putback_movable_page(struct page *page);
  58. extern int migrate_prep(void);
  59. extern int migrate_prep_local(void);
  60. extern void migrate_page_states(struct page *newpage, struct page *page);
  61. extern void migrate_page_copy(struct page *newpage, struct page *page);
  62. extern int migrate_huge_page_move_mapping(struct address_space *mapping,
  63. struct page *newpage, struct page *page);
  64. extern int migrate_page_move_mapping(struct address_space *mapping,
  65. struct page *newpage, struct page *page,
  66. struct buffer_head *head, enum migrate_mode mode,
  67. int extra_count);
  68. #else
  69. static inline void putback_movable_pages(struct list_head *l) {}
  70. static inline int migrate_pages(struct list_head *l, new_page_t new,
  71. free_page_t free, unsigned long private, enum migrate_mode mode,
  72. int reason)
  73. { return -ENOSYS; }
  74. static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
  75. { return -EBUSY; }
  76. static inline int migrate_prep(void) { return -ENOSYS; }
  77. static inline int migrate_prep_local(void) { return -ENOSYS; }
  78. static inline void migrate_page_states(struct page *newpage, struct page *page)
  79. {
  80. }
  81. static inline void migrate_page_copy(struct page *newpage,
  82. struct page *page) {}
  83. static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
  84. struct page *newpage, struct page *page)
  85. {
  86. return -ENOSYS;
  87. }
  88. #endif /* CONFIG_MIGRATION */
  89. #ifdef CONFIG_COMPACTION
  90. extern int PageMovable(struct page *page);
  91. extern void __SetPageMovable(struct page *page, struct address_space *mapping);
  92. extern void __ClearPageMovable(struct page *page);
  93. #else
  94. static inline int PageMovable(struct page *page) { return 0; };
  95. static inline void __SetPageMovable(struct page *page,
  96. struct address_space *mapping)
  97. {
  98. }
  99. static inline void __ClearPageMovable(struct page *page)
  100. {
  101. }
  102. #endif
  103. #ifdef CONFIG_NUMA_BALANCING
  104. extern bool pmd_trans_migrating(pmd_t pmd);
  105. extern int migrate_misplaced_page(struct page *page,
  106. struct vm_area_struct *vma, int node);
  107. #else
  108. static inline bool pmd_trans_migrating(pmd_t pmd)
  109. {
  110. return false;
  111. }
  112. static inline int migrate_misplaced_page(struct page *page,
  113. struct vm_area_struct *vma, int node)
  114. {
  115. return -EAGAIN; /* can't migrate now */
  116. }
  117. #endif /* CONFIG_NUMA_BALANCING */
  118. #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
  119. extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
  120. struct vm_area_struct *vma,
  121. pmd_t *pmd, pmd_t entry,
  122. unsigned long address,
  123. struct page *page, int node);
  124. #else
  125. static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
  126. struct vm_area_struct *vma,
  127. pmd_t *pmd, pmd_t entry,
  128. unsigned long address,
  129. struct page *page, int node)
  130. {
  131. return -EAGAIN;
  132. }
  133. #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
  134. #endif /* _LINUX_MIGRATE_H */