vmx_tsc_adjust_test.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /*
  2. * gtests/tests/vmx_tsc_adjust_test.c
  3. *
  4. * Copyright (C) 2018, Google LLC.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2.
  7. *
  8. *
  9. * IA32_TSC_ADJUST test
  10. *
  11. * According to the SDM, "if an execution of WRMSR to the
  12. * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
  13. * the logical processor also adds (or subtracts) value X from the
  14. * IA32_TSC_ADJUST MSR.
  15. *
  16. * Note that when L1 doesn't intercept writes to IA32_TSC, a
  17. * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
  18. * value.
  19. *
  20. * This test verifies that this unusual case is handled correctly.
  21. */
  22. #include "test_util.h"
  23. #include "kvm_util.h"
  24. #include "x86.h"
  25. #include "vmx.h"
  26. #include <string.h>
  27. #include <sys/ioctl.h>
  28. #ifndef MSR_IA32_TSC_ADJUST
  29. #define MSR_IA32_TSC_ADJUST 0x3b
  30. #endif
  31. #define PAGE_SIZE 4096
  32. #define VCPU_ID 5
  33. #define TSC_ADJUST_VALUE (1ll << 32)
  34. #define TSC_OFFSET_VALUE -(1ll << 48)
  35. enum {
  36. PORT_ABORT = 0x1000,
  37. PORT_REPORT,
  38. PORT_DONE,
  39. };
  40. struct vmx_page {
  41. vm_vaddr_t virt;
  42. vm_paddr_t phys;
  43. };
  44. enum {
  45. VMXON_PAGE = 0,
  46. VMCS_PAGE,
  47. MSR_BITMAP_PAGE,
  48. NUM_VMX_PAGES,
  49. };
  50. struct kvm_single_msr {
  51. struct kvm_msrs header;
  52. struct kvm_msr_entry entry;
  53. } __attribute__((packed));
  54. /* The virtual machine object. */
  55. static struct kvm_vm *vm;
  56. /* Array of vmx_page descriptors that is shared with the guest. */
  57. struct vmx_page *vmx_pages;
  58. #define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg))
  59. static void do_exit_to_l0(uint16_t port, unsigned long arg)
  60. {
  61. __asm__ __volatile__("in %[port], %%al"
  62. :
  63. : [port]"d"(port), "D"(arg)
  64. : "rax");
  65. }
  66. #define GUEST_ASSERT(_condition) do { \
  67. if (!(_condition)) \
  68. exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \
  69. } while (0)
  70. static void check_ia32_tsc_adjust(int64_t max)
  71. {
  72. int64_t adjust;
  73. adjust = rdmsr(MSR_IA32_TSC_ADJUST);
  74. exit_to_l0(PORT_REPORT, adjust);
  75. GUEST_ASSERT(adjust <= max);
  76. }
  77. static void l2_guest_code(void)
  78. {
  79. uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
  80. wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
  81. check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
  82. /* Exit to L1 */
  83. __asm__ __volatile__("vmcall");
  84. }
  85. static void l1_guest_code(struct vmx_page *vmx_pages)
  86. {
  87. #define L2_GUEST_STACK_SIZE 64
  88. unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  89. uint32_t control;
  90. uintptr_t save_cr3;
  91. GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
  92. wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
  93. check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
  94. prepare_for_vmx_operation();
  95. /* Enter VMX root operation. */
  96. *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision();
  97. GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys));
  98. /* Load a VMCS. */
  99. *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision();
  100. GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys));
  101. GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys));
  102. /* Prepare the VMCS for L2 execution. */
  103. prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
  104. control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
  105. control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
  106. vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
  107. vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys);
  108. vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
  109. /* Jump into L2. First, test failure to load guest CR3. */
  110. save_cr3 = vmreadz(GUEST_CR3);
  111. vmwrite(GUEST_CR3, -1ull);
  112. GUEST_ASSERT(!vmlaunch());
  113. GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
  114. (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
  115. check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
  116. vmwrite(GUEST_CR3, save_cr3);
  117. GUEST_ASSERT(!vmlaunch());
  118. GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  119. check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
  120. exit_to_l0(PORT_DONE, 0);
  121. }
  122. static void allocate_vmx_page(struct vmx_page *page)
  123. {
  124. vm_vaddr_t virt;
  125. virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0);
  126. memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE);
  127. page->virt = virt;
  128. page->phys = addr_gva2gpa(vm, virt);
  129. }
  130. static vm_vaddr_t allocate_vmx_pages(void)
  131. {
  132. vm_vaddr_t vmx_pages_vaddr;
  133. int i;
  134. vmx_pages_vaddr = vm_vaddr_alloc(
  135. vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0);
  136. vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr);
  137. for (i = 0; i < NUM_VMX_PAGES; i++)
  138. allocate_vmx_page(&vmx_pages[i]);
  139. return vmx_pages_vaddr;
  140. }
  141. void report(int64_t val)
  142. {
  143. printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
  144. val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
  145. }
  146. int main(int argc, char *argv[])
  147. {
  148. vm_vaddr_t vmx_pages_vaddr;
  149. struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
  150. if (!(entry->ecx & CPUID_VMX)) {
  151. fprintf(stderr, "nested VMX not enabled, skipping test\n");
  152. exit(KSFT_SKIP);
  153. }
  154. vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code);
  155. /* Allocate VMX pages and shared descriptors (vmx_pages). */
  156. vmx_pages_vaddr = allocate_vmx_pages();
  157. vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr);
  158. for (;;) {
  159. volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
  160. struct kvm_regs regs;
  161. vcpu_run(vm, VCPU_ID);
  162. TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
  163. "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n",
  164. run->exit_reason,
  165. exit_reason_str(run->exit_reason));
  166. vcpu_regs_get(vm, VCPU_ID, &regs);
  167. switch (run->io.port) {
  168. case PORT_ABORT:
  169. TEST_ASSERT(false, "%s", (const char *) regs.rdi);
  170. /* NOT REACHED */
  171. case PORT_REPORT:
  172. report(regs.rdi);
  173. break;
  174. case PORT_DONE:
  175. goto done;
  176. default:
  177. TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
  178. }
  179. }
  180. kvm_vm_free(vm);
  181. done:
  182. return 0;
  183. }