balloon.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. /******************************************************************************
  2. * Xen balloon driver - enables returning/claiming memory to/from Xen.
  3. *
  4. * Copyright (c) 2003, B Dragovic
  5. * Copyright (c) 2003-2004, M Williamson, K Fraser
  6. * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  7. * Copyright (c) 2010 Daniel Kiper
  8. *
  9. * Memory hotplug support was written by Daniel Kiper. Work on
  10. * it was sponsored by Google under Google Summer of Code 2010
  11. * program. Jeremy Fitzhardinge from Citrix was the mentor for
  12. * this project.
  13. *
  14. * This program is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU General Public License version 2
  16. * as published by the Free Software Foundation; or, when distributed
  17. * separately from the Linux kernel or incorporated into other
  18. * software packages, subject to the following license:
  19. *
  20. * Permission is hereby granted, free of charge, to any person obtaining a copy
  21. * of this source file (the "Software"), to deal in the Software without
  22. * restriction, including without limitation the rights to use, copy, modify,
  23. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  24. * and to permit persons to whom the Software is furnished to do so, subject to
  25. * the following conditions:
  26. *
  27. * The above copyright notice and this permission notice shall be included in
  28. * all copies or substantial portions of the Software.
  29. *
  30. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  31. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  32. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  33. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  34. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  35. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  36. * IN THE SOFTWARE.
  37. */
  38. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  39. #include <linux/cpu.h>
  40. #include <linux/kernel.h>
  41. #include <linux/sched.h>
  42. #include <linux/errno.h>
  43. #include <linux/module.h>
  44. #include <linux/mm.h>
  45. #include <linux/bootmem.h>
  46. #include <linux/pagemap.h>
  47. #include <linux/highmem.h>
  48. #include <linux/mutex.h>
  49. #include <linux/list.h>
  50. #include <linux/gfp.h>
  51. #include <linux/notifier.h>
  52. #include <linux/memory.h>
  53. #include <linux/memory_hotplug.h>
  54. #include <linux/percpu-defs.h>
  55. #include <linux/slab.h>
  56. #include <asm/page.h>
  57. #include <asm/pgalloc.h>
  58. #include <asm/pgtable.h>
  59. #include <asm/tlb.h>
  60. #include <asm/xen/hypervisor.h>
  61. #include <asm/xen/hypercall.h>
  62. #include <xen/xen.h>
  63. #include <xen/interface/xen.h>
  64. #include <xen/interface/memory.h>
  65. #include <xen/balloon.h>
  66. #include <xen/features.h>
  67. #include <xen/page.h>
  68. /*
  69. * balloon_process() state:
  70. *
  71. * BP_DONE: done or nothing to do,
  72. * BP_EAGAIN: error, go to sleep,
  73. * BP_ECANCELED: error, balloon operation canceled.
  74. */
  75. enum bp_state {
  76. BP_DONE,
  77. BP_EAGAIN,
  78. BP_ECANCELED
  79. };
  80. static DEFINE_MUTEX(balloon_mutex);
  81. struct balloon_stats balloon_stats;
  82. EXPORT_SYMBOL_GPL(balloon_stats);
  83. /* We increase/decrease in batches which fit in a page */
  84. static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
  85. /* List of ballooned pages, threaded through the mem_map array. */
  86. static LIST_HEAD(ballooned_pages);
  87. /* Main work function, always executed in process context. */
  88. static void balloon_process(struct work_struct *work);
  89. static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
  90. /* When ballooning out (allocating memory to return to Xen) we don't really
  91. want the kernel to try too hard since that can trigger the oom killer. */
  92. #define GFP_BALLOON \
  93. (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
  94. static void scrub_page(struct page *page)
  95. {
  96. #ifdef CONFIG_XEN_SCRUB_PAGES
  97. clear_highpage(page);
  98. #endif
  99. }
  100. /* balloon_append: add the given page to the balloon. */
  101. static void __balloon_append(struct page *page)
  102. {
  103. /* Lowmem is re-populated first, so highmem pages go at list tail. */
  104. if (PageHighMem(page)) {
  105. list_add_tail(&page->lru, &ballooned_pages);
  106. balloon_stats.balloon_high++;
  107. } else {
  108. list_add(&page->lru, &ballooned_pages);
  109. balloon_stats.balloon_low++;
  110. }
  111. }
  112. static void balloon_append(struct page *page)
  113. {
  114. __balloon_append(page);
  115. adjust_managed_page_count(page, -1);
  116. }
  117. /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
  118. static struct page *balloon_retrieve(bool prefer_highmem)
  119. {
  120. struct page *page;
  121. if (list_empty(&ballooned_pages))
  122. return NULL;
  123. if (prefer_highmem)
  124. page = list_entry(ballooned_pages.prev, struct page, lru);
  125. else
  126. page = list_entry(ballooned_pages.next, struct page, lru);
  127. list_del(&page->lru);
  128. if (PageHighMem(page))
  129. balloon_stats.balloon_high--;
  130. else
  131. balloon_stats.balloon_low--;
  132. adjust_managed_page_count(page, 1);
  133. return page;
  134. }
  135. static struct page *balloon_next_page(struct page *page)
  136. {
  137. struct list_head *next = page->lru.next;
  138. if (next == &ballooned_pages)
  139. return NULL;
  140. return list_entry(next, struct page, lru);
  141. }
  142. static enum bp_state update_schedule(enum bp_state state)
  143. {
  144. if (state == BP_ECANCELED)
  145. return BP_ECANCELED;
  146. if (state == BP_DONE) {
  147. balloon_stats.schedule_delay = 1;
  148. balloon_stats.retry_count = 1;
  149. return BP_DONE;
  150. }
  151. ++balloon_stats.retry_count;
  152. if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
  153. balloon_stats.retry_count > balloon_stats.max_retry_count) {
  154. balloon_stats.schedule_delay = 1;
  155. balloon_stats.retry_count = 1;
  156. return BP_ECANCELED;
  157. }
  158. balloon_stats.schedule_delay <<= 1;
  159. if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
  160. balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
  161. return BP_EAGAIN;
  162. }
  163. #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
  164. static struct resource *additional_memory_resource(phys_addr_t size)
  165. {
  166. struct resource *res;
  167. int ret;
  168. res = kzalloc(sizeof(*res), GFP_KERNEL);
  169. if (!res)
  170. return NULL;
  171. res->name = "System RAM";
  172. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  173. ret = allocate_resource(&iomem_resource, res,
  174. size, 0, -1,
  175. PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
  176. if (ret < 0) {
  177. pr_err("Cannot allocate new System RAM resource\n");
  178. kfree(res);
  179. return NULL;
  180. }
  181. return res;
  182. }
  183. static void release_memory_resource(struct resource *resource)
  184. {
  185. if (!resource)
  186. return;
  187. /*
  188. * No need to reset region to identity mapped since we now
  189. * know that no I/O can be in this region
  190. */
  191. release_resource(resource);
  192. kfree(resource);
  193. }
  194. static enum bp_state reserve_additional_memory(long credit)
  195. {
  196. struct resource *resource;
  197. int nid, rc;
  198. unsigned long balloon_hotplug;
  199. balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
  200. resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
  201. if (!resource)
  202. goto err;
  203. nid = memory_add_physaddr_to_nid(resource->start);
  204. #ifdef CONFIG_XEN_HAVE_PVMMU
  205. /*
  206. * add_memory() will build page tables for the new memory so
  207. * the p2m must contain invalid entries so the correct
  208. * non-present PTEs will be written.
  209. *
  210. * If a failure occurs, the original (identity) p2m entries
  211. * are not restored since this region is now known not to
  212. * conflict with any devices.
  213. */
  214. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  215. unsigned long pfn, i;
  216. pfn = PFN_DOWN(resource->start);
  217. for (i = 0; i < balloon_hotplug; i++) {
  218. if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
  219. pr_warn("set_phys_to_machine() failed, no memory added\n");
  220. goto err;
  221. }
  222. }
  223. }
  224. #endif
  225. rc = add_memory_resource(nid, resource);
  226. if (rc) {
  227. pr_warn("Cannot add additional memory (%i)\n", rc);
  228. goto err;
  229. }
  230. balloon_stats.total_pages += balloon_hotplug;
  231. return BP_DONE;
  232. err:
  233. release_memory_resource(resource);
  234. return BP_ECANCELED;
  235. }
  236. static void xen_online_page(struct page *page)
  237. {
  238. __online_page_set_limits(page);
  239. mutex_lock(&balloon_mutex);
  240. __balloon_append(page);
  241. mutex_unlock(&balloon_mutex);
  242. }
  243. static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
  244. {
  245. if (val == MEM_ONLINE)
  246. schedule_delayed_work(&balloon_worker, 0);
  247. return NOTIFY_OK;
  248. }
  249. static struct notifier_block xen_memory_nb = {
  250. .notifier_call = xen_memory_notifier,
  251. .priority = 0
  252. };
  253. #else
  254. static enum bp_state reserve_additional_memory(long credit)
  255. {
  256. balloon_stats.target_pages = balloon_stats.current_pages;
  257. return BP_DONE;
  258. }
  259. #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
  260. static long current_credit(void)
  261. {
  262. return balloon_stats.target_pages - balloon_stats.current_pages;
  263. }
  264. static bool balloon_is_inflated(void)
  265. {
  266. return balloon_stats.balloon_low || balloon_stats.balloon_high;
  267. }
  268. static enum bp_state increase_reservation(unsigned long nr_pages)
  269. {
  270. int rc;
  271. unsigned long pfn, i;
  272. struct page *page;
  273. struct xen_memory_reservation reservation = {
  274. .address_bits = 0,
  275. .extent_order = 0,
  276. .domid = DOMID_SELF
  277. };
  278. if (nr_pages > ARRAY_SIZE(frame_list))
  279. nr_pages = ARRAY_SIZE(frame_list);
  280. page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
  281. for (i = 0; i < nr_pages; i++) {
  282. if (!page) {
  283. nr_pages = i;
  284. break;
  285. }
  286. frame_list[i] = page_to_pfn(page);
  287. page = balloon_next_page(page);
  288. }
  289. set_xen_guest_handle(reservation.extent_start, frame_list);
  290. reservation.nr_extents = nr_pages;
  291. rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
  292. if (rc <= 0)
  293. return BP_EAGAIN;
  294. for (i = 0; i < rc; i++) {
  295. page = balloon_retrieve(false);
  296. BUG_ON(page == NULL);
  297. pfn = page_to_pfn(page);
  298. #ifdef CONFIG_XEN_HAVE_PVMMU
  299. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  300. set_phys_to_machine(pfn, frame_list[i]);
  301. /* Link back into the page tables if not highmem. */
  302. if (!PageHighMem(page)) {
  303. int ret;
  304. ret = HYPERVISOR_update_va_mapping(
  305. (unsigned long)__va(pfn << PAGE_SHIFT),
  306. mfn_pte(frame_list[i], PAGE_KERNEL),
  307. 0);
  308. BUG_ON(ret);
  309. }
  310. }
  311. #endif
  312. /* Relinquish the page back to the allocator. */
  313. __free_reserved_page(page);
  314. }
  315. balloon_stats.current_pages += rc;
  316. return BP_DONE;
  317. }
  318. static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
  319. {
  320. enum bp_state state = BP_DONE;
  321. unsigned long pfn, i;
  322. struct page *page;
  323. int ret;
  324. struct xen_memory_reservation reservation = {
  325. .address_bits = 0,
  326. .extent_order = 0,
  327. .domid = DOMID_SELF
  328. };
  329. if (nr_pages > ARRAY_SIZE(frame_list))
  330. nr_pages = ARRAY_SIZE(frame_list);
  331. for (i = 0; i < nr_pages; i++) {
  332. page = alloc_page(gfp);
  333. if (page == NULL) {
  334. nr_pages = i;
  335. state = BP_EAGAIN;
  336. break;
  337. }
  338. scrub_page(page);
  339. frame_list[i] = page_to_pfn(page);
  340. }
  341. /*
  342. * Ensure that ballooned highmem pages don't have kmaps.
  343. *
  344. * Do this before changing the p2m as kmap_flush_unused()
  345. * reads PTEs to obtain pages (and hence needs the original
  346. * p2m entry).
  347. */
  348. kmap_flush_unused();
  349. /* Update direct mapping, invalidate P2M, and add to balloon. */
  350. for (i = 0; i < nr_pages; i++) {
  351. pfn = frame_list[i];
  352. frame_list[i] = pfn_to_gfn(pfn);
  353. page = pfn_to_page(pfn);
  354. #ifdef CONFIG_XEN_HAVE_PVMMU
  355. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  356. if (!PageHighMem(page)) {
  357. ret = HYPERVISOR_update_va_mapping(
  358. (unsigned long)__va(pfn << PAGE_SHIFT),
  359. __pte_ma(0), 0);
  360. BUG_ON(ret);
  361. }
  362. __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
  363. }
  364. #endif
  365. balloon_append(page);
  366. }
  367. flush_tlb_all();
  368. set_xen_guest_handle(reservation.extent_start, frame_list);
  369. reservation.nr_extents = nr_pages;
  370. ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
  371. BUG_ON(ret != nr_pages);
  372. balloon_stats.current_pages -= nr_pages;
  373. return state;
  374. }
  375. /*
  376. * As this is a work item it is guaranteed to run as a single instance only.
  377. * We may of course race updates of the target counts (which are protected
  378. * by the balloon lock), or with changes to the Xen hard limit, but we will
  379. * recover from these in time.
  380. */
  381. static void balloon_process(struct work_struct *work)
  382. {
  383. enum bp_state state = BP_DONE;
  384. long credit;
  385. do {
  386. mutex_lock(&balloon_mutex);
  387. credit = current_credit();
  388. if (credit > 0) {
  389. if (balloon_is_inflated())
  390. state = increase_reservation(credit);
  391. else
  392. state = reserve_additional_memory(credit);
  393. }
  394. if (credit < 0)
  395. state = decrease_reservation(-credit, GFP_BALLOON);
  396. state = update_schedule(state);
  397. mutex_unlock(&balloon_mutex);
  398. cond_resched();
  399. } while (credit && state == BP_DONE);
  400. /* Schedule more work if there is some still to be done. */
  401. if (state == BP_EAGAIN)
  402. schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
  403. }
  404. /* Resets the Xen limit, sets new target, and kicks off processing. */
  405. void balloon_set_new_target(unsigned long target)
  406. {
  407. /* No need for lock. Not read-modify-write updates. */
  408. balloon_stats.target_pages = target;
  409. schedule_delayed_work(&balloon_worker, 0);
  410. }
  411. EXPORT_SYMBOL_GPL(balloon_set_new_target);
  412. /**
  413. * alloc_xenballooned_pages - get pages that have been ballooned out
  414. * @nr_pages: Number of pages to get
  415. * @pages: pages returned
  416. * @highmem: allow highmem pages
  417. * @return 0 on success, error otherwise
  418. */
  419. int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
  420. {
  421. int pgno = 0;
  422. struct page *page;
  423. mutex_lock(&balloon_mutex);
  424. while (pgno < nr_pages) {
  425. page = balloon_retrieve(highmem);
  426. if (page && (highmem || !PageHighMem(page))) {
  427. pages[pgno++] = page;
  428. } else {
  429. enum bp_state st;
  430. if (page)
  431. balloon_append(page);
  432. st = decrease_reservation(nr_pages - pgno,
  433. highmem ? GFP_HIGHUSER : GFP_USER);
  434. if (st != BP_DONE)
  435. goto out_undo;
  436. }
  437. }
  438. mutex_unlock(&balloon_mutex);
  439. return 0;
  440. out_undo:
  441. while (pgno)
  442. balloon_append(pages[--pgno]);
  443. /* Free the memory back to the kernel soon */
  444. schedule_delayed_work(&balloon_worker, 0);
  445. mutex_unlock(&balloon_mutex);
  446. return -ENOMEM;
  447. }
  448. EXPORT_SYMBOL(alloc_xenballooned_pages);
  449. /**
  450. * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
  451. * @nr_pages: Number of pages
  452. * @pages: pages to return
  453. */
  454. void free_xenballooned_pages(int nr_pages, struct page **pages)
  455. {
  456. int i;
  457. mutex_lock(&balloon_mutex);
  458. for (i = 0; i < nr_pages; i++) {
  459. if (pages[i])
  460. balloon_append(pages[i]);
  461. }
  462. /* The balloon may be too large now. Shrink it if needed. */
  463. if (current_credit())
  464. schedule_delayed_work(&balloon_worker, 0);
  465. mutex_unlock(&balloon_mutex);
  466. }
  467. EXPORT_SYMBOL(free_xenballooned_pages);
  468. static void __init balloon_add_region(unsigned long start_pfn,
  469. unsigned long pages)
  470. {
  471. unsigned long pfn, extra_pfn_end;
  472. struct page *page;
  473. /*
  474. * If the amount of usable memory has been limited (e.g., with
  475. * the 'mem' command line parameter), don't add pages beyond
  476. * this limit.
  477. */
  478. extra_pfn_end = min(max_pfn, start_pfn + pages);
  479. for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
  480. page = pfn_to_page(pfn);
  481. /* totalram_pages and totalhigh_pages do not
  482. include the boot-time balloon extension, so
  483. don't subtract from it. */
  484. __balloon_append(page);
  485. }
  486. balloon_stats.total_pages += extra_pfn_end - start_pfn;
  487. }
  488. static int __init balloon_init(void)
  489. {
  490. int i;
  491. if (!xen_domain())
  492. return -ENODEV;
  493. pr_info("Initialising balloon driver\n");
  494. balloon_stats.current_pages = xen_pv_domain()
  495. ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
  496. : get_num_physpages();
  497. balloon_stats.target_pages = balloon_stats.current_pages;
  498. balloon_stats.balloon_low = 0;
  499. balloon_stats.balloon_high = 0;
  500. balloon_stats.total_pages = balloon_stats.current_pages;
  501. balloon_stats.schedule_delay = 1;
  502. balloon_stats.max_schedule_delay = 32;
  503. balloon_stats.retry_count = 1;
  504. balloon_stats.max_retry_count = RETRY_UNLIMITED;
  505. #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
  506. set_online_page_callback(&xen_online_page);
  507. register_memory_notifier(&xen_memory_nb);
  508. #endif
  509. /*
  510. * Initialize the balloon with pages from the extra memory
  511. * regions (see arch/x86/xen/setup.c).
  512. */
  513. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
  514. if (xen_extra_mem[i].n_pfns)
  515. balloon_add_region(xen_extra_mem[i].start_pfn,
  516. xen_extra_mem[i].n_pfns);
  517. return 0;
  518. }
  519. subsys_initcall(balloon_init);
  520. MODULE_LICENSE("GPL");