migrate.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. #ifndef _LINUX_MIGRATE_H
  2. #define _LINUX_MIGRATE_H
  3. #include <linux/mm.h>
  4. #include <linux/mempolicy.h>
  5. #include <linux/migrate_mode.h>
  6. #include <linux/hugetlb.h>
  7. typedef struct page *new_page_t(struct page *page, unsigned long private,
  8. int **reason);
  9. typedef void free_page_t(struct page *page, unsigned long private);
  10. /*
  11. * Return values from addresss_space_operations.migratepage():
  12. * - negative errno on page migration failure;
  13. * - zero on page migration success;
  14. */
  15. #define MIGRATEPAGE_SUCCESS 0
  16. enum migrate_reason {
  17. MR_COMPACTION,
  18. MR_MEMORY_FAILURE,
  19. MR_MEMORY_HOTPLUG,
  20. MR_SYSCALL, /* also applies to cpusets */
  21. MR_MEMPOLICY_MBIND,
  22. MR_NUMA_MISPLACED,
  23. MR_CMA,
  24. MR_TYPES
  25. };
  26. /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
  27. extern char *migrate_reason_names[MR_TYPES];
  28. static inline struct page *new_page_nodemask(struct page *page,
  29. int preferred_nid, nodemask_t *nodemask)
  30. {
  31. gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
  32. if (PageHuge(page))
  33. return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
  34. nodemask);
  35. if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
  36. gfp_mask |= __GFP_HIGHMEM;
  37. return __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, nodemask);
  38. }
  39. #ifdef CONFIG_MIGRATION
  40. extern void putback_movable_pages(struct list_head *l);
  41. extern int migrate_page(struct address_space *mapping,
  42. struct page *newpage, struct page *page,
  43. enum migrate_mode mode);
  44. extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
  45. unsigned long private, enum migrate_mode mode, int reason);
  46. extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
  47. extern void putback_movable_page(struct page *page);
  48. extern int migrate_prep(void);
  49. extern int migrate_prep_local(void);
  50. extern void migrate_page_copy(struct page *newpage, struct page *page);
  51. extern int migrate_huge_page_move_mapping(struct address_space *mapping,
  52. struct page *newpage, struct page *page);
  53. extern int migrate_page_move_mapping(struct address_space *mapping,
  54. struct page *newpage, struct page *page,
  55. struct buffer_head *head, enum migrate_mode mode,
  56. int extra_count);
  57. #else
  58. static inline void putback_movable_pages(struct list_head *l) {}
  59. static inline int migrate_pages(struct list_head *l, new_page_t new,
  60. free_page_t free, unsigned long private, enum migrate_mode mode,
  61. int reason)
  62. { return -ENOSYS; }
  63. static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
  64. { return -EBUSY; }
  65. static inline int migrate_prep(void) { return -ENOSYS; }
  66. static inline int migrate_prep_local(void) { return -ENOSYS; }
  67. static inline void migrate_page_copy(struct page *newpage,
  68. struct page *page) {}
  69. static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
  70. struct page *newpage, struct page *page)
  71. {
  72. return -ENOSYS;
  73. }
  74. #endif /* CONFIG_MIGRATION */
  75. #ifdef CONFIG_COMPACTION
  76. extern int PageMovable(struct page *page);
  77. extern void __SetPageMovable(struct page *page, struct address_space *mapping);
  78. extern void __ClearPageMovable(struct page *page);
  79. #else
  80. static inline int PageMovable(struct page *page) { return 0; };
  81. static inline void __SetPageMovable(struct page *page,
  82. struct address_space *mapping)
  83. {
  84. }
  85. static inline void __ClearPageMovable(struct page *page)
  86. {
  87. }
  88. #endif
  89. #ifdef CONFIG_NUMA_BALANCING
  90. extern bool pmd_trans_migrating(pmd_t pmd);
  91. extern int migrate_misplaced_page(struct page *page,
  92. struct vm_area_struct *vma, int node);
  93. #else
  94. static inline bool pmd_trans_migrating(pmd_t pmd)
  95. {
  96. return false;
  97. }
  98. static inline int migrate_misplaced_page(struct page *page,
  99. struct vm_area_struct *vma, int node)
  100. {
  101. return -EAGAIN; /* can't migrate now */
  102. }
  103. #endif /* CONFIG_NUMA_BALANCING */
  104. #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
  105. extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
  106. struct vm_area_struct *vma,
  107. pmd_t *pmd, pmd_t entry,
  108. unsigned long address,
  109. struct page *page, int node);
  110. #else
  111. static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
  112. struct vm_area_struct *vma,
  113. pmd_t *pmd, pmd_t entry,
  114. unsigned long address,
  115. struct page *page, int node)
  116. {
  117. return -EAGAIN;
  118. }
  119. #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
  120. #endif /* _LINUX_MIGRATE_H */