vmx_tsc_adjust_test.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * gtests/tests/vmx_tsc_adjust_test.c
  3. *
  4. * Copyright (C) 2018, Google LLC.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2.
  7. *
  8. *
  9. * IA32_TSC_ADJUST test
  10. *
  11. * According to the SDM, "if an execution of WRMSR to the
  12. * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
  13. * the logical processor also adds (or subtracts) value X from the
  14. * IA32_TSC_ADJUST MSR.
  15. *
  16. * Note that when L1 doesn't intercept writes to IA32_TSC, a
  17. * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
  18. * value.
  19. *
  20. * This test verifies that this unusual case is handled correctly.
  21. */
  22. #include "test_util.h"
  23. #include "kvm_util.h"
  24. #include "x86.h"
  25. #include "vmx.h"
  26. #include <string.h>
  27. #include <sys/ioctl.h>
  28. #include "../kselftest.h"
  29. #ifndef MSR_IA32_TSC_ADJUST
  30. #define MSR_IA32_TSC_ADJUST 0x3b
  31. #endif
  32. #define PAGE_SIZE 4096
  33. #define VCPU_ID 5
  34. #define TSC_ADJUST_VALUE (1ll << 32)
  35. #define TSC_OFFSET_VALUE -(1ll << 48)
  36. enum {
  37. PORT_ABORT = 0x1000,
  38. PORT_REPORT,
  39. PORT_DONE,
  40. };
  41. struct vmx_page {
  42. vm_vaddr_t virt;
  43. vm_paddr_t phys;
  44. };
  45. enum {
  46. VMXON_PAGE = 0,
  47. VMCS_PAGE,
  48. MSR_BITMAP_PAGE,
  49. NUM_VMX_PAGES,
  50. };
  51. struct kvm_single_msr {
  52. struct kvm_msrs header;
  53. struct kvm_msr_entry entry;
  54. } __attribute__((packed));
  55. /* The virtual machine object. */
  56. static struct kvm_vm *vm;
  57. /* Array of vmx_page descriptors that is shared with the guest. */
  58. struct vmx_page *vmx_pages;
  59. #define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg))
  60. static void do_exit_to_l0(uint16_t port, unsigned long arg)
  61. {
  62. __asm__ __volatile__("in %[port], %%al"
  63. :
  64. : [port]"d"(port), "D"(arg)
  65. : "rax");
  66. }
  67. #define GUEST_ASSERT(_condition) do { \
  68. if (!(_condition)) \
  69. exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \
  70. } while (0)
  71. static void check_ia32_tsc_adjust(int64_t max)
  72. {
  73. int64_t adjust;
  74. adjust = rdmsr(MSR_IA32_TSC_ADJUST);
  75. exit_to_l0(PORT_REPORT, adjust);
  76. GUEST_ASSERT(adjust <= max);
  77. }
  78. static void l2_guest_code(void)
  79. {
  80. uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
  81. wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
  82. check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
  83. /* Exit to L1 */
  84. __asm__ __volatile__("vmcall");
  85. }
  86. static void l1_guest_code(struct vmx_page *vmx_pages)
  87. {
  88. #define L2_GUEST_STACK_SIZE 64
  89. unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  90. uint32_t control;
  91. uintptr_t save_cr3;
  92. GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
  93. wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
  94. check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
  95. prepare_for_vmx_operation();
  96. /* Enter VMX root operation. */
  97. *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision();
  98. GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys));
  99. /* Load a VMCS. */
  100. *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision();
  101. GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys));
  102. GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys));
  103. /* Prepare the VMCS for L2 execution. */
  104. prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
  105. control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
  106. control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
  107. vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
  108. vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys);
  109. vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
  110. /* Jump into L2. First, test failure to load guest CR3. */
  111. save_cr3 = vmreadz(GUEST_CR3);
  112. vmwrite(GUEST_CR3, -1ull);
  113. GUEST_ASSERT(!vmlaunch());
  114. GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
  115. (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
  116. check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
  117. vmwrite(GUEST_CR3, save_cr3);
  118. GUEST_ASSERT(!vmlaunch());
  119. GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  120. check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
  121. exit_to_l0(PORT_DONE, 0);
  122. }
  123. static void allocate_vmx_page(struct vmx_page *page)
  124. {
  125. vm_vaddr_t virt;
  126. virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0);
  127. memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE);
  128. page->virt = virt;
  129. page->phys = addr_gva2gpa(vm, virt);
  130. }
  131. static vm_vaddr_t allocate_vmx_pages(void)
  132. {
  133. vm_vaddr_t vmx_pages_vaddr;
  134. int i;
  135. vmx_pages_vaddr = vm_vaddr_alloc(
  136. vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0);
  137. vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr);
  138. for (i = 0; i < NUM_VMX_PAGES; i++)
  139. allocate_vmx_page(&vmx_pages[i]);
  140. return vmx_pages_vaddr;
  141. }
  142. void report(int64_t val)
  143. {
  144. printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
  145. val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
  146. }
  147. int main(int argc, char *argv[])
  148. {
  149. vm_vaddr_t vmx_pages_vaddr;
  150. struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
  151. if (!(entry->ecx & CPUID_VMX)) {
  152. fprintf(stderr, "nested VMX not enabled, skipping test\n");
  153. exit(KSFT_SKIP);
  154. }
  155. vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code);
  156. /* Allocate VMX pages and shared descriptors (vmx_pages). */
  157. vmx_pages_vaddr = allocate_vmx_pages();
  158. vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr);
  159. for (;;) {
  160. volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
  161. struct kvm_regs regs;
  162. vcpu_run(vm, VCPU_ID);
  163. TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
  164. "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n",
  165. run->exit_reason,
  166. exit_reason_str(run->exit_reason));
  167. vcpu_regs_get(vm, VCPU_ID, &regs);
  168. switch (run->io.port) {
  169. case PORT_ABORT:
  170. TEST_ASSERT(false, "%s", (const char *) regs.rdi);
  171. /* NOT REACHED */
  172. case PORT_REPORT:
  173. report(regs.rdi);
  174. break;
  175. case PORT_DONE:
  176. goto done;
  177. default:
  178. TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
  179. }
  180. }
  181. kvm_vm_free(vm);
  182. done:
  183. return 0;
  184. }