mem-reservation.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. // SPDX-License-Identifier: GPL-2.0
  2. /******************************************************************************
  3. * Xen memory reservation utilities.
  4. *
  5. * Copyright (c) 2003, B Dragovic
  6. * Copyright (c) 2003-2004, M Williamson, K Fraser
  7. * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  8. * Copyright (c) 2010 Daniel Kiper
  9. * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  10. */
  11. #include <asm/xen/hypercall.h>
  12. #include <xen/interface/memory.h>
  13. #include <xen/mem-reservation.h>
  14. /*
  15. * Use one extent per PAGE_SIZE to avoid to break down the page into
  16. * multiple frame.
  17. */
  18. #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
  19. #ifdef CONFIG_XEN_HAVE_PVMMU
  20. void __xenmem_reservation_va_mapping_update(unsigned long count,
  21. struct page **pages,
  22. xen_pfn_t *frames)
  23. {
  24. int i;
  25. for (i = 0; i < count; i++) {
  26. struct page *page = pages[i];
  27. unsigned long pfn = page_to_pfn(page);
  28. BUG_ON(!page);
  29. /*
  30. * We don't support PV MMU when Linux and Xen is using
  31. * different page granularity.
  32. */
  33. BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
  34. set_phys_to_machine(pfn, frames[i]);
  35. /* Link back into the page tables if not highmem. */
  36. if (!PageHighMem(page)) {
  37. int ret;
  38. ret = HYPERVISOR_update_va_mapping(
  39. (unsigned long)__va(pfn << PAGE_SHIFT),
  40. mfn_pte(frames[i], PAGE_KERNEL),
  41. 0);
  42. BUG_ON(ret);
  43. }
  44. }
  45. }
  46. EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
  47. void __xenmem_reservation_va_mapping_reset(unsigned long count,
  48. struct page **pages)
  49. {
  50. int i;
  51. for (i = 0; i < count; i++) {
  52. struct page *page = pages[i];
  53. unsigned long pfn = page_to_pfn(page);
  54. /*
  55. * We don't support PV MMU when Linux and Xen are using
  56. * different page granularity.
  57. */
  58. BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
  59. if (!PageHighMem(page)) {
  60. int ret;
  61. ret = HYPERVISOR_update_va_mapping(
  62. (unsigned long)__va(pfn << PAGE_SHIFT),
  63. __pte_ma(0), 0);
  64. BUG_ON(ret);
  65. }
  66. __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
  67. }
  68. }
  69. EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
  70. #endif /* CONFIG_XEN_HAVE_PVMMU */
  71. /* @frames is an array of PFNs */
  72. int xenmem_reservation_increase(int count, xen_pfn_t *frames)
  73. {
  74. struct xen_memory_reservation reservation = {
  75. .address_bits = 0,
  76. .extent_order = EXTENT_ORDER,
  77. .domid = DOMID_SELF
  78. };
  79. /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
  80. set_xen_guest_handle(reservation.extent_start, frames);
  81. reservation.nr_extents = count;
  82. return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
  83. }
  84. EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
  85. /* @frames is an array of GFNs */
  86. int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
  87. {
  88. struct xen_memory_reservation reservation = {
  89. .address_bits = 0,
  90. .extent_order = EXTENT_ORDER,
  91. .domid = DOMID_SELF
  92. };
  93. /* XENMEM_decrease_reservation requires a GFN */
  94. set_xen_guest_handle(reservation.extent_start, frames);
  95. reservation.nr_extents = count;
  96. return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
  97. }
  98. EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);