page_isolation.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /*
  2. * linux/mm/page_isolation.c
  3. */
  4. #include <linux/mm.h>
  5. #include <linux/page-isolation.h>
  6. #include <linux/pageblock-flags.h>
  7. #include <linux/memory.h>
  8. #include <linux/hugetlb.h>
  9. #include "internal.h"
  10. int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
  11. {
  12. struct zone *zone;
  13. unsigned long flags, pfn;
  14. struct memory_isolate_notify arg;
  15. int notifier_ret;
  16. int ret = -EBUSY;
  17. zone = page_zone(page);
  18. spin_lock_irqsave(&zone->lock, flags);
  19. pfn = page_to_pfn(page);
  20. arg.start_pfn = pfn;
  21. arg.nr_pages = pageblock_nr_pages;
  22. arg.pages_found = 0;
  23. /*
  24. * It may be possible to isolate a pageblock even if the
  25. * migratetype is not MIGRATE_MOVABLE. The memory isolation
  26. * notifier chain is used by balloon drivers to return the
  27. * number of pages in a range that are held by the balloon
  28. * driver to shrink memory. If all the pages are accounted for
  29. * by balloons, are free, or on the LRU, isolation can continue.
  30. * Later, for example, when memory hotplug notifier runs, these
  31. * pages reported as "can be isolated" should be isolated(freed)
  32. * by the balloon driver through the memory notifier chain.
  33. */
  34. notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  35. notifier_ret = notifier_to_errno(notifier_ret);
  36. if (notifier_ret)
  37. goto out;
  38. /*
  39. * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  40. * We just check MOVABLE pages.
  41. */
  42. if (!has_unmovable_pages(zone, page, arg.pages_found,
  43. skip_hwpoisoned_pages))
  44. ret = 0;
  45. /*
  46. * immobile means "not-on-lru" paes. If immobile is larger than
  47. * removable-by-driver pages reported by notifier, we'll fail.
  48. */
  49. out:
  50. if (!ret) {
  51. unsigned long nr_pages;
  52. int migratetype = get_pageblock_migratetype(page);
  53. set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  54. zone->nr_isolate_pageblock++;
  55. nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
  56. __mod_zone_freepage_state(zone, -nr_pages, migratetype);
  57. }
  58. spin_unlock_irqrestore(&zone->lock, flags);
  59. if (!ret)
  60. drain_all_pages(zone);
  61. return ret;
  62. }
  63. void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  64. {
  65. struct zone *zone;
  66. unsigned long flags, nr_pages;
  67. struct page *isolated_page = NULL;
  68. unsigned int order;
  69. unsigned long page_idx, buddy_idx;
  70. struct page *buddy;
  71. zone = page_zone(page);
  72. spin_lock_irqsave(&zone->lock, flags);
  73. if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  74. goto out;
  75. /*
  76. * Because freepage with more than pageblock_order on isolated
  77. * pageblock is restricted to merge due to freepage counting problem,
  78. * it is possible that there is free buddy page.
  79. * move_freepages_block() doesn't care of merge so we need other
  80. * approach in order to merge them. Isolation and free will make
  81. * these pages to be merged.
  82. */
  83. if (PageBuddy(page)) {
  84. order = page_order(page);
  85. if (order >= pageblock_order) {
  86. page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  87. buddy_idx = __find_buddy_index(page_idx, order);
  88. buddy = page + (buddy_idx - page_idx);
  89. if (pfn_valid_within(page_to_pfn(buddy)) &&
  90. !is_migrate_isolate_page(buddy)) {
  91. __isolate_free_page(page, order);
  92. kernel_map_pages(page, (1 << order), 1);
  93. set_page_refcounted(page);
  94. isolated_page = page;
  95. }
  96. }
  97. }
  98. /*
  99. * If we isolate freepage with more than pageblock_order, there
  100. * should be no freepage in the range, so we could avoid costly
  101. * pageblock scanning for freepage moving.
  102. */
  103. if (!isolated_page) {
  104. nr_pages = move_freepages_block(zone, page, migratetype);
  105. __mod_zone_freepage_state(zone, nr_pages, migratetype);
  106. }
  107. set_pageblock_migratetype(page, migratetype);
  108. zone->nr_isolate_pageblock--;
  109. out:
  110. spin_unlock_irqrestore(&zone->lock, flags);
  111. if (isolated_page)
  112. __free_pages(isolated_page, order);
  113. }
  114. static inline struct page *
  115. __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  116. {
  117. int i;
  118. for (i = 0; i < nr_pages; i++)
  119. if (pfn_valid_within(pfn + i))
  120. break;
  121. if (unlikely(i == nr_pages))
  122. return NULL;
  123. return pfn_to_page(pfn + i);
  124. }
  125. /*
  126. * start_isolate_page_range() -- make page-allocation-type of range of pages
  127. * to be MIGRATE_ISOLATE.
  128. * @start_pfn: The lower PFN of the range to be isolated.
  129. * @end_pfn: The upper PFN of the range to be isolated.
  130. * @migratetype: migrate type to set in error recovery.
  131. *
  132. * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  133. * the range will never be allocated. Any free pages and pages freed in the
  134. * future will not be allocated again.
  135. *
  136. * start_pfn/end_pfn must be aligned to pageblock_order.
  137. * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
  138. */
  139. int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  140. unsigned migratetype, bool skip_hwpoisoned_pages)
  141. {
  142. unsigned long pfn;
  143. unsigned long undo_pfn;
  144. struct page *page;
  145. BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
  146. BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
  147. for (pfn = start_pfn;
  148. pfn < end_pfn;
  149. pfn += pageblock_nr_pages) {
  150. page = __first_valid_page(pfn, pageblock_nr_pages);
  151. if (page &&
  152. set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
  153. undo_pfn = pfn;
  154. goto undo;
  155. }
  156. }
  157. return 0;
  158. undo:
  159. for (pfn = start_pfn;
  160. pfn < undo_pfn;
  161. pfn += pageblock_nr_pages)
  162. unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
  163. return -EBUSY;
  164. }
  165. /*
  166. * Make isolated pages available again.
  167. */
  168. int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  169. unsigned migratetype)
  170. {
  171. unsigned long pfn;
  172. struct page *page;
  173. BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
  174. BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
  175. for (pfn = start_pfn;
  176. pfn < end_pfn;
  177. pfn += pageblock_nr_pages) {
  178. page = __first_valid_page(pfn, pageblock_nr_pages);
  179. if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  180. continue;
  181. unset_migratetype_isolate(page, migratetype);
  182. }
  183. return 0;
  184. }
  185. /*
  186. * Test all pages in the range is free(means isolated) or not.
  187. * all pages in [start_pfn...end_pfn) must be in the same zone.
  188. * zone->lock must be held before call this.
  189. *
  190. * Returns 1 if all pages in the range are isolated.
  191. */
  192. static int
  193. __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
  194. bool skip_hwpoisoned_pages)
  195. {
  196. struct page *page;
  197. while (pfn < end_pfn) {
  198. if (!pfn_valid_within(pfn)) {
  199. pfn++;
  200. continue;
  201. }
  202. page = pfn_to_page(pfn);
  203. if (PageBuddy(page)) {
  204. /*
  205. * If race between isolatation and allocation happens,
  206. * some free pages could be in MIGRATE_MOVABLE list
  207. * although pageblock's migratation type of the page
  208. * is MIGRATE_ISOLATE. Catch it and move the page into
  209. * MIGRATE_ISOLATE list.
  210. */
  211. if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
  212. struct page *end_page;
  213. end_page = page + (1 << page_order(page)) - 1;
  214. move_freepages(page_zone(page), page, end_page,
  215. MIGRATE_ISOLATE);
  216. }
  217. pfn += 1 << page_order(page);
  218. }
  219. else if (page_count(page) == 0 &&
  220. get_freepage_migratetype(page) == MIGRATE_ISOLATE)
  221. pfn += 1;
  222. else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
  223. /*
  224. * The HWPoisoned page may be not in buddy
  225. * system, and page_count() is not 0.
  226. */
  227. pfn++;
  228. continue;
  229. }
  230. else
  231. break;
  232. }
  233. if (pfn < end_pfn)
  234. return 0;
  235. return 1;
  236. }
  237. int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
  238. bool skip_hwpoisoned_pages)
  239. {
  240. unsigned long pfn, flags;
  241. struct page *page;
  242. struct zone *zone;
  243. int ret;
  244. /*
  245. * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
  246. * are not aligned to pageblock_nr_pages.
  247. * Then we just check migratetype first.
  248. */
  249. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  250. page = __first_valid_page(pfn, pageblock_nr_pages);
  251. if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  252. break;
  253. }
  254. page = __first_valid_page(start_pfn, end_pfn - start_pfn);
  255. if ((pfn < end_pfn) || !page)
  256. return -EBUSY;
  257. /* Check all pages are free or marked as ISOLATED */
  258. zone = page_zone(page);
  259. spin_lock_irqsave(&zone->lock, flags);
  260. ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
  261. skip_hwpoisoned_pages);
  262. spin_unlock_irqrestore(&zone->lock, flags);
  263. return ret ? 0 : -EBUSY;
  264. }
  265. struct page *alloc_migrate_target(struct page *page, unsigned long private,
  266. int **resultp)
  267. {
  268. gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
  269. /*
  270. * TODO: allocate a destination hugepage from a nearest neighbor node,
  271. * accordance with memory policy of the user process if possible. For
  272. * now as a simple work-around, we use the next node for destination.
  273. */
  274. if (PageHuge(page)) {
  275. nodemask_t src = nodemask_of_node(page_to_nid(page));
  276. nodemask_t dst;
  277. nodes_complement(dst, src);
  278. return alloc_huge_page_node(page_hstate(compound_head(page)),
  279. next_node(page_to_nid(page), dst));
  280. }
  281. if (PageHighMem(page))
  282. gfp_mask |= __GFP_HIGHMEM;
  283. return alloc_page(gfp_mask);
  284. }