page_isolation.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /*
  2. * linux/mm/page_isolation.c
  3. */
  4. #include <linux/mm.h>
  5. #include <linux/page-isolation.h>
  6. #include <linux/pageblock-flags.h>
  7. #include <linux/memory.h>
  8. #include <linux/hugetlb.h>
  9. #include <linux/page_owner.h>
  10. #include <linux/migrate.h>
  11. #include "internal.h"
  12. #define CREATE_TRACE_POINTS
  13. #include <trace/events/page_isolation.h>
  14. static int set_migratetype_isolate(struct page *page,
  15. bool skip_hwpoisoned_pages)
  16. {
  17. struct zone *zone;
  18. unsigned long flags, pfn;
  19. struct memory_isolate_notify arg;
  20. int notifier_ret;
  21. int ret = -EBUSY;
  22. zone = page_zone(page);
  23. spin_lock_irqsave(&zone->lock, flags);
  24. pfn = page_to_pfn(page);
  25. arg.start_pfn = pfn;
  26. arg.nr_pages = pageblock_nr_pages;
  27. arg.pages_found = 0;
  28. /*
  29. * It may be possible to isolate a pageblock even if the
  30. * migratetype is not MIGRATE_MOVABLE. The memory isolation
  31. * notifier chain is used by balloon drivers to return the
  32. * number of pages in a range that are held by the balloon
  33. * driver to shrink memory. If all the pages are accounted for
  34. * by balloons, are free, or on the LRU, isolation can continue.
  35. * Later, for example, when memory hotplug notifier runs, these
  36. * pages reported as "can be isolated" should be isolated(freed)
  37. * by the balloon driver through the memory notifier chain.
  38. */
  39. notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  40. notifier_ret = notifier_to_errno(notifier_ret);
  41. if (notifier_ret)
  42. goto out;
  43. /*
  44. * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  45. * We just check MOVABLE pages.
  46. */
  47. if (!has_unmovable_pages(zone, page, arg.pages_found,
  48. skip_hwpoisoned_pages))
  49. ret = 0;
  50. /*
  51. * immobile means "not-on-lru" pages. If immobile is larger than
  52. * removable-by-driver pages reported by notifier, we'll fail.
  53. */
  54. out:
  55. if (!ret) {
  56. unsigned long nr_pages;
  57. int migratetype = get_pageblock_migratetype(page);
  58. set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  59. zone->nr_isolate_pageblock++;
  60. nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
  61. NULL);
  62. __mod_zone_freepage_state(zone, -nr_pages, migratetype);
  63. }
  64. spin_unlock_irqrestore(&zone->lock, flags);
  65. if (!ret)
  66. drain_all_pages(zone);
  67. return ret;
  68. }
  69. static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  70. {
  71. struct zone *zone;
  72. unsigned long flags, nr_pages;
  73. bool isolated_page = false;
  74. unsigned int order;
  75. unsigned long pfn, buddy_pfn;
  76. struct page *buddy;
  77. zone = page_zone(page);
  78. spin_lock_irqsave(&zone->lock, flags);
  79. if (!is_migrate_isolate_page(page))
  80. goto out;
  81. /*
  82. * Because freepage with more than pageblock_order on isolated
  83. * pageblock is restricted to merge due to freepage counting problem,
  84. * it is possible that there is free buddy page.
  85. * move_freepages_block() doesn't care of merge so we need other
  86. * approach in order to merge them. Isolation and free will make
  87. * these pages to be merged.
  88. */
  89. if (PageBuddy(page)) {
  90. order = page_order(page);
  91. if (order >= pageblock_order) {
  92. pfn = page_to_pfn(page);
  93. buddy_pfn = __find_buddy_pfn(pfn, order);
  94. buddy = page + (buddy_pfn - pfn);
  95. if (pfn_valid_within(buddy_pfn) &&
  96. !is_migrate_isolate_page(buddy)) {
  97. __isolate_free_page(page, order);
  98. isolated_page = true;
  99. }
  100. }
  101. }
  102. /*
  103. * If we isolate freepage with more than pageblock_order, there
  104. * should be no freepage in the range, so we could avoid costly
  105. * pageblock scanning for freepage moving.
  106. */
  107. if (!isolated_page) {
  108. nr_pages = move_freepages_block(zone, page, migratetype, NULL);
  109. __mod_zone_freepage_state(zone, nr_pages, migratetype);
  110. }
  111. set_pageblock_migratetype(page, migratetype);
  112. zone->nr_isolate_pageblock--;
  113. out:
  114. spin_unlock_irqrestore(&zone->lock, flags);
  115. if (isolated_page) {
  116. post_alloc_hook(page, order, __GFP_MOVABLE);
  117. __free_pages(page, order);
  118. }
  119. }
  120. static inline struct page *
  121. __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  122. {
  123. int i;
  124. for (i = 0; i < nr_pages; i++) {
  125. struct page *page;
  126. if (!pfn_valid_within(pfn + i))
  127. continue;
  128. page = pfn_to_online_page(pfn + i);
  129. if (!page)
  130. continue;
  131. return page;
  132. }
  133. return NULL;
  134. }
  135. /*
  136. * start_isolate_page_range() -- make page-allocation-type of range of pages
  137. * to be MIGRATE_ISOLATE.
  138. * @start_pfn: The lower PFN of the range to be isolated.
  139. * @end_pfn: The upper PFN of the range to be isolated.
  140. * @migratetype: migrate type to set in error recovery.
  141. *
  142. * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  143. * the range will never be allocated. Any free pages and pages freed in the
  144. * future will not be allocated again.
  145. *
  146. * start_pfn/end_pfn must be aligned to pageblock_order.
  147. * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
  148. */
  149. int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  150. unsigned migratetype, bool skip_hwpoisoned_pages)
  151. {
  152. unsigned long pfn;
  153. unsigned long undo_pfn;
  154. struct page *page;
  155. BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
  156. BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
  157. for (pfn = start_pfn;
  158. pfn < end_pfn;
  159. pfn += pageblock_nr_pages) {
  160. page = __first_valid_page(pfn, pageblock_nr_pages);
  161. if (page &&
  162. set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
  163. undo_pfn = pfn;
  164. goto undo;
  165. }
  166. }
  167. return 0;
  168. undo:
  169. for (pfn = start_pfn;
  170. pfn < undo_pfn;
  171. pfn += pageblock_nr_pages) {
  172. struct page *page = pfn_to_online_page(pfn);
  173. if (!page)
  174. continue;
  175. unset_migratetype_isolate(page, migratetype);
  176. }
  177. return -EBUSY;
  178. }
  179. /*
  180. * Make isolated pages available again.
  181. */
  182. int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  183. unsigned migratetype)
  184. {
  185. unsigned long pfn;
  186. struct page *page;
  187. BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
  188. BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
  189. for (pfn = start_pfn;
  190. pfn < end_pfn;
  191. pfn += pageblock_nr_pages) {
  192. page = __first_valid_page(pfn, pageblock_nr_pages);
  193. if (!page || !is_migrate_isolate_page(page))
  194. continue;
  195. unset_migratetype_isolate(page, migratetype);
  196. }
  197. return 0;
  198. }
  199. /*
  200. * Test all pages in the range is free(means isolated) or not.
  201. * all pages in [start_pfn...end_pfn) must be in the same zone.
  202. * zone->lock must be held before call this.
  203. *
  204. * Returns the last tested pfn.
  205. */
  206. static unsigned long
  207. __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
  208. bool skip_hwpoisoned_pages)
  209. {
  210. struct page *page;
  211. while (pfn < end_pfn) {
  212. if (!pfn_valid_within(pfn)) {
  213. pfn++;
  214. continue;
  215. }
  216. page = pfn_to_page(pfn);
  217. if (PageBuddy(page))
  218. /*
  219. * If the page is on a free list, it has to be on
  220. * the correct MIGRATE_ISOLATE freelist. There is no
  221. * simple way to verify that as VM_BUG_ON(), though.
  222. */
  223. pfn += 1 << page_order(page);
  224. else if (skip_hwpoisoned_pages && PageHWPoison(page))
  225. /* A HWPoisoned page cannot be also PageBuddy */
  226. pfn++;
  227. else
  228. break;
  229. }
  230. return pfn;
  231. }
  232. /* Caller should ensure that requested range is in a single zone */
  233. int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
  234. bool skip_hwpoisoned_pages)
  235. {
  236. unsigned long pfn, flags;
  237. struct page *page;
  238. struct zone *zone;
  239. /*
  240. * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
  241. * are not aligned to pageblock_nr_pages.
  242. * Then we just check migratetype first.
  243. */
  244. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  245. page = __first_valid_page(pfn, pageblock_nr_pages);
  246. if (page && !is_migrate_isolate_page(page))
  247. break;
  248. }
  249. page = __first_valid_page(start_pfn, end_pfn - start_pfn);
  250. if ((pfn < end_pfn) || !page)
  251. return -EBUSY;
  252. /* Check all pages are free or marked as ISOLATED */
  253. zone = page_zone(page);
  254. spin_lock_irqsave(&zone->lock, flags);
  255. pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
  256. skip_hwpoisoned_pages);
  257. spin_unlock_irqrestore(&zone->lock, flags);
  258. trace_test_pages_isolated(start_pfn, end_pfn, pfn);
  259. return pfn < end_pfn ? -EBUSY : 0;
  260. }
  261. struct page *alloc_migrate_target(struct page *page, unsigned long private,
  262. int **resultp)
  263. {
  264. return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
  265. }