balloon_compaction.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * mm/balloon_compaction.c
  3. *
  4. * Common interface for making balloon pages movable by compaction.
  5. *
  6. * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/slab.h>
  10. #include <linux/export.h>
  11. #include <linux/balloon_compaction.h>
  12. /*
  13. * balloon_page_enqueue - allocates a new page and inserts it into the balloon
  14. * page list.
  15. * @b_dev_info: balloon device decriptor where we will insert a new page to
  16. *
  17. * Driver must call it to properly allocate a new enlisted balloon page
  18. * before definetively removing it from the guest system.
  19. * This function returns the page address for the recently enqueued page or
  20. * NULL in the case we fail to allocate a new page this turn.
  21. */
  22. struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
  23. {
  24. unsigned long flags;
  25. struct page *page = alloc_page(balloon_mapping_gfp_mask() |
  26. __GFP_NOMEMALLOC | __GFP_NORETRY);
  27. if (!page)
  28. return NULL;
  29. /*
  30. * Block others from accessing the 'page' when we get around to
  31. * establishing additional references. We should be the only one
  32. * holding a reference to the 'page' at this point.
  33. */
  34. BUG_ON(!trylock_page(page));
  35. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  36. balloon_page_insert(b_dev_info, page);
  37. __count_vm_event(BALLOON_INFLATE);
  38. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  39. unlock_page(page);
  40. return page;
  41. }
  42. EXPORT_SYMBOL_GPL(balloon_page_enqueue);
  43. /*
  44. * balloon_page_dequeue - removes a page from balloon's page list and returns
  45. * the its address to allow the driver release the page.
  46. * @b_dev_info: balloon device decriptor where we will grab a page from.
  47. *
  48. * Driver must call it to properly de-allocate a previous enlisted balloon page
  49. * before definetively releasing it back to the guest system.
  50. * This function returns the page address for the recently dequeued page or
  51. * NULL in the case we find balloon's page list temporarily empty due to
  52. * compaction isolated pages.
  53. */
  54. struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
  55. {
  56. struct page *page, *tmp;
  57. unsigned long flags;
  58. bool dequeued_page;
  59. dequeued_page = false;
  60. list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
  61. /*
  62. * Block others from accessing the 'page' while we get around
  63. * establishing additional references and preparing the 'page'
  64. * to be released by the balloon driver.
  65. */
  66. if (trylock_page(page)) {
  67. if (!PagePrivate(page)) {
  68. /* raced with isolation */
  69. unlock_page(page);
  70. continue;
  71. }
  72. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  73. balloon_page_delete(page);
  74. __count_vm_event(BALLOON_DEFLATE);
  75. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  76. unlock_page(page);
  77. dequeued_page = true;
  78. break;
  79. }
  80. }
  81. if (!dequeued_page) {
  82. /*
  83. * If we are unable to dequeue a balloon page because the page
  84. * list is empty and there is no isolated pages, then something
  85. * went out of track and some balloon pages are lost.
  86. * BUG() here, otherwise the balloon driver may get stuck into
  87. * an infinite loop while attempting to release all its pages.
  88. */
  89. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  90. if (unlikely(list_empty(&b_dev_info->pages) &&
  91. !b_dev_info->isolated_pages))
  92. BUG();
  93. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  94. page = NULL;
  95. }
  96. return page;
  97. }
  98. EXPORT_SYMBOL_GPL(balloon_page_dequeue);
  99. #ifdef CONFIG_BALLOON_COMPACTION
  100. static inline void __isolate_balloon_page(struct page *page)
  101. {
  102. struct balloon_dev_info *b_dev_info = balloon_page_device(page);
  103. unsigned long flags;
  104. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  105. ClearPagePrivate(page);
  106. list_del(&page->lru);
  107. b_dev_info->isolated_pages++;
  108. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  109. }
  110. static inline void __putback_balloon_page(struct page *page)
  111. {
  112. struct balloon_dev_info *b_dev_info = balloon_page_device(page);
  113. unsigned long flags;
  114. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  115. SetPagePrivate(page);
  116. list_add(&page->lru, &b_dev_info->pages);
  117. b_dev_info->isolated_pages--;
  118. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  119. }
  120. /* __isolate_lru_page() counterpart for a ballooned page */
  121. bool balloon_page_isolate(struct page *page)
  122. {
  123. /*
  124. * Avoid burning cycles with pages that are yet under __free_pages(),
  125. * or just got freed under us.
  126. *
  127. * In case we 'win' a race for a balloon page being freed under us and
  128. * raise its refcount preventing __free_pages() from doing its job
  129. * the put_page() at the end of this block will take care of
  130. * release this page, thus avoiding a nasty leakage.
  131. */
  132. if (likely(get_page_unless_zero(page))) {
  133. /*
  134. * As balloon pages are not isolated from LRU lists, concurrent
  135. * compaction threads can race against page migration functions
  136. * as well as race against the balloon driver releasing a page.
  137. *
  138. * In order to avoid having an already isolated balloon page
  139. * being (wrongly) re-isolated while it is under migration,
  140. * or to avoid attempting to isolate pages being released by
  141. * the balloon driver, lets be sure we have the page lock
  142. * before proceeding with the balloon page isolation steps.
  143. */
  144. if (likely(trylock_page(page))) {
  145. /*
  146. * A ballooned page, by default, has PagePrivate set.
  147. * Prevent concurrent compaction threads from isolating
  148. * an already isolated balloon page by clearing it.
  149. */
  150. if (balloon_page_movable(page)) {
  151. __isolate_balloon_page(page);
  152. unlock_page(page);
  153. return true;
  154. }
  155. unlock_page(page);
  156. }
  157. put_page(page);
  158. }
  159. return false;
  160. }
  161. /* putback_lru_page() counterpart for a ballooned page */
  162. void balloon_page_putback(struct page *page)
  163. {
  164. /*
  165. * 'lock_page()' stabilizes the page and prevents races against
  166. * concurrent isolation threads attempting to re-isolate it.
  167. */
  168. lock_page(page);
  169. if (__is_movable_balloon_page(page)) {
  170. __putback_balloon_page(page);
  171. /* drop the extra ref count taken for page isolation */
  172. put_page(page);
  173. } else {
  174. WARN_ON(1);
  175. dump_page(page, "not movable balloon page");
  176. }
  177. unlock_page(page);
  178. }
  179. /* move_to_new_page() counterpart for a ballooned page */
  180. int balloon_page_migrate(struct page *newpage,
  181. struct page *page, enum migrate_mode mode)
  182. {
  183. struct balloon_dev_info *balloon = balloon_page_device(page);
  184. int rc = -EAGAIN;
  185. /*
  186. * Block others from accessing the 'newpage' when we get around to
  187. * establishing additional references. We should be the only one
  188. * holding a reference to the 'newpage' at this point.
  189. */
  190. BUG_ON(!trylock_page(newpage));
  191. if (WARN_ON(!__is_movable_balloon_page(page))) {
  192. dump_page(page, "not movable balloon page");
  193. unlock_page(newpage);
  194. return rc;
  195. }
  196. if (balloon && balloon->migratepage)
  197. rc = balloon->migratepage(balloon, newpage, page, mode);
  198. unlock_page(newpage);
  199. return rc;
  200. }
  201. #endif /* CONFIG_BALLOON_COMPACTION */