enlighten_hvm.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/acpi.h>
  3. #include <linux/cpu.h>
  4. #include <linux/kexec.h>
  5. #include <linux/memblock.h>
  6. #include <xen/features.h>
  7. #include <xen/events.h>
  8. #include <xen/interface/memory.h>
  9. #include <asm/cpu.h>
  10. #include <asm/smp.h>
  11. #include <asm/reboot.h>
  12. #include <asm/setup.h>
  13. #include <asm/hypervisor.h>
  14. #include <asm/e820/api.h>
  15. #include <asm/early_ioremap.h>
  16. #include <asm/xen/cpuid.h>
  17. #include <asm/xen/hypervisor.h>
  18. #include <asm/xen/page.h>
  19. #include "xen-ops.h"
  20. #include "mmu.h"
  21. #include "smp.h"
  22. static unsigned long shared_info_pfn;
  23. void xen_hvm_init_shared_info(void)
  24. {
  25. struct xen_add_to_physmap xatp;
  26. xatp.domid = DOMID_SELF;
  27. xatp.idx = 0;
  28. xatp.space = XENMAPSPACE_shared_info;
  29. xatp.gpfn = shared_info_pfn;
  30. if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
  31. BUG();
  32. }
  33. static void __init reserve_shared_info(void)
  34. {
  35. u64 pa;
  36. /*
  37. * Search for a free page starting at 4kB physical address.
  38. * Low memory is preferred to avoid an EPT large page split up
  39. * by the mapping.
  40. * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
  41. * the BIOS used for HVM guests is well behaved and won't
  42. * clobber memory other than the first 4kB.
  43. */
  44. for (pa = PAGE_SIZE;
  45. !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
  46. memblock_is_reserved(pa);
  47. pa += PAGE_SIZE)
  48. ;
  49. shared_info_pfn = PHYS_PFN(pa);
  50. memblock_reserve(pa, PAGE_SIZE);
  51. HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
  52. }
  53. static void __init xen_hvm_init_mem_mapping(void)
  54. {
  55. early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
  56. HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
  57. /*
  58. * The virtual address of the shared_info page has changed, so
  59. * the vcpu_info pointer for VCPU 0 is now stale.
  60. *
  61. * The prepare_boot_cpu callback will re-initialize it via
  62. * xen_vcpu_setup, but we can't rely on that to be called for
  63. * old Xen versions (xen_have_vector_callback == 0).
  64. *
  65. * It is, in any case, bad to have a stale vcpu_info pointer
  66. * so reset it now.
  67. */
  68. xen_vcpu_info_reset(0);
  69. }
  70. static void __init init_hvm_pv_info(void)
  71. {
  72. int major, minor;
  73. uint32_t eax, ebx, ecx, edx, base;
  74. base = xen_cpuid_base();
  75. eax = cpuid_eax(base + 1);
  76. major = eax >> 16;
  77. minor = eax & 0xffff;
  78. printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
  79. xen_domain_type = XEN_HVM_DOMAIN;
  80. /* PVH set up hypercall page in xen_prepare_pvh(). */
  81. if (xen_pvh_domain())
  82. pv_info.name = "Xen PVH";
  83. else {
  84. u64 pfn;
  85. uint32_t msr;
  86. pv_info.name = "Xen HVM";
  87. msr = cpuid_ebx(base + 2);
  88. pfn = __pa(hypercall_page);
  89. wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
  90. }
  91. xen_setup_features();
  92. cpuid(base + 4, &eax, &ebx, &ecx, &edx);
  93. if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
  94. this_cpu_write(xen_vcpu_id, ebx);
  95. else
  96. this_cpu_write(xen_vcpu_id, smp_processor_id());
  97. }
  98. #ifdef CONFIG_KEXEC_CORE
  99. static void xen_hvm_shutdown(void)
  100. {
  101. native_machine_shutdown();
  102. if (kexec_in_progress)
  103. xen_reboot(SHUTDOWN_soft_reset);
  104. }
  105. static void xen_hvm_crash_shutdown(struct pt_regs *regs)
  106. {
  107. native_machine_crash_shutdown(regs);
  108. xen_reboot(SHUTDOWN_soft_reset);
  109. }
  110. #endif
  111. static int xen_cpu_up_prepare_hvm(unsigned int cpu)
  112. {
  113. int rc = 0;
  114. /*
  115. * This can happen if CPU was offlined earlier and
  116. * offlining timed out in common_cpu_die().
  117. */
  118. if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
  119. xen_smp_intr_free(cpu);
  120. xen_uninit_lock_cpu(cpu);
  121. }
  122. if (cpu_acpi_id(cpu) != U32_MAX)
  123. per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
  124. else
  125. per_cpu(xen_vcpu_id, cpu) = cpu;
  126. rc = xen_vcpu_setup(cpu);
  127. if (rc)
  128. return rc;
  129. if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
  130. xen_setup_timer(cpu);
  131. rc = xen_smp_intr_init(cpu);
  132. if (rc) {
  133. WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
  134. cpu, rc);
  135. }
  136. return rc;
  137. }
  138. static int xen_cpu_dead_hvm(unsigned int cpu)
  139. {
  140. xen_smp_intr_free(cpu);
  141. if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
  142. xen_teardown_timer(cpu);
  143. return 0;
  144. }
  145. static void __init xen_hvm_guest_init(void)
  146. {
  147. if (xen_pv_domain())
  148. return;
  149. init_hvm_pv_info();
  150. reserve_shared_info();
  151. xen_hvm_init_shared_info();
  152. /*
  153. * xen_vcpu is a pointer to the vcpu_info struct in the shared_info
  154. * page, we use it in the event channel upcall and in some pvclock
  155. * related functions.
  156. */
  157. xen_vcpu_info_reset(0);
  158. xen_panic_handler_init();
  159. if (xen_feature(XENFEAT_hvm_callback_vector))
  160. xen_have_vector_callback = 1;
  161. xen_hvm_smp_init();
  162. WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
  163. xen_unplug_emulated_devices();
  164. x86_init.irqs.intr_init = xen_init_IRQ;
  165. xen_hvm_init_time_ops();
  166. xen_hvm_init_mmu_ops();
  167. #ifdef CONFIG_KEXEC_CORE
  168. machine_ops.shutdown = xen_hvm_shutdown;
  169. machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
  170. #endif
  171. }
  172. static bool xen_nopv;
  173. static __init int xen_parse_nopv(char *arg)
  174. {
  175. xen_nopv = true;
  176. return 0;
  177. }
  178. early_param("xen_nopv", xen_parse_nopv);
  179. bool xen_hvm_need_lapic(void)
  180. {
  181. if (xen_nopv)
  182. return false;
  183. if (xen_pv_domain())
  184. return false;
  185. if (!xen_hvm_domain())
  186. return false;
  187. if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
  188. return false;
  189. return true;
  190. }
  191. EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
  192. static uint32_t __init xen_platform_hvm(void)
  193. {
  194. if (xen_pv_domain() || xen_nopv)
  195. return 0;
  196. return xen_cpuid_base();
  197. }
  198. static __init void xen_hvm_guest_late_init(void)
  199. {
  200. #ifdef CONFIG_XEN_PVH
  201. /* Test for PVH domain (PVH boot path taken overrides ACPI flags). */
  202. if (!xen_pvh &&
  203. (x86_platform.legacy.rtc || !x86_platform.legacy.no_vga))
  204. return;
  205. /* PVH detected. */
  206. xen_pvh = true;
  207. /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
  208. if (!nr_ioapics && acpi_irq_model == ACPI_IRQ_MODEL_PIC)
  209. acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
  210. machine_ops.emergency_restart = xen_emergency_restart;
  211. pv_info.name = "Xen PVH";
  212. #endif
  213. }
  214. const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = {
  215. .name = "Xen HVM",
  216. .detect = xen_platform_hvm,
  217. .type = X86_HYPER_XEN_HVM,
  218. .init.init_platform = xen_hvm_guest_init,
  219. .init.x2apic_available = xen_x2apic_para_available,
  220. .init.init_mem_mapping = xen_hvm_init_mem_mapping,
  221. .init.guest_late_init = xen_hvm_guest_late_init,
  222. .runtime.pin_vcpu = xen_pin_vcpu,
  223. };