x86.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * tools/testing/selftests/kvm/lib/x86.c
  3. *
  4. * Copyright (C) 2018, Google LLC.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2.
  7. */
  8. #define _GNU_SOURCE /* for program_invocation_name */
  9. #include "test_util.h"
  10. #include "kvm_util.h"
  11. #include "kvm_util_internal.h"
  12. #include "x86.h"
  13. /* Minimum physical address used for virtual translation tables. */
  14. #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
  15. /* Virtual translation table structure declarations */
  16. struct pageMapL4Entry {
  17. uint64_t present:1;
  18. uint64_t writable:1;
  19. uint64_t user:1;
  20. uint64_t write_through:1;
  21. uint64_t cache_disable:1;
  22. uint64_t accessed:1;
  23. uint64_t ignored_06:1;
  24. uint64_t page_size:1;
  25. uint64_t ignored_11_08:4;
  26. uint64_t address:40;
  27. uint64_t ignored_62_52:11;
  28. uint64_t execute_disable:1;
  29. };
  30. struct pageDirectoryPointerEntry {
  31. uint64_t present:1;
  32. uint64_t writable:1;
  33. uint64_t user:1;
  34. uint64_t write_through:1;
  35. uint64_t cache_disable:1;
  36. uint64_t accessed:1;
  37. uint64_t ignored_06:1;
  38. uint64_t page_size:1;
  39. uint64_t ignored_11_08:4;
  40. uint64_t address:40;
  41. uint64_t ignored_62_52:11;
  42. uint64_t execute_disable:1;
  43. };
  44. struct pageDirectoryEntry {
  45. uint64_t present:1;
  46. uint64_t writable:1;
  47. uint64_t user:1;
  48. uint64_t write_through:1;
  49. uint64_t cache_disable:1;
  50. uint64_t accessed:1;
  51. uint64_t ignored_06:1;
  52. uint64_t page_size:1;
  53. uint64_t ignored_11_08:4;
  54. uint64_t address:40;
  55. uint64_t ignored_62_52:11;
  56. uint64_t execute_disable:1;
  57. };
  58. struct pageTableEntry {
  59. uint64_t present:1;
  60. uint64_t writable:1;
  61. uint64_t user:1;
  62. uint64_t write_through:1;
  63. uint64_t cache_disable:1;
  64. uint64_t accessed:1;
  65. uint64_t dirty:1;
  66. uint64_t reserved_07:1;
  67. uint64_t global:1;
  68. uint64_t ignored_11_09:3;
  69. uint64_t address:40;
  70. uint64_t ignored_62_52:11;
  71. uint64_t execute_disable:1;
  72. };
  73. /* Register Dump
  74. *
  75. * Input Args:
  76. * indent - Left margin indent amount
  77. * regs - register
  78. *
  79. * Output Args:
  80. * stream - Output FILE stream
  81. *
  82. * Return: None
  83. *
  84. * Dumps the state of the registers given by regs, to the FILE stream
  85. * given by steam.
  86. */
  87. void regs_dump(FILE *stream, struct kvm_regs *regs,
  88. uint8_t indent)
  89. {
  90. fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
  91. "rcx: 0x%.16llx rdx: 0x%.16llx\n",
  92. indent, "",
  93. regs->rax, regs->rbx, regs->rcx, regs->rdx);
  94. fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
  95. "rsp: 0x%.16llx rbp: 0x%.16llx\n",
  96. indent, "",
  97. regs->rsi, regs->rdi, regs->rsp, regs->rbp);
  98. fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx "
  99. "r10: 0x%.16llx r11: 0x%.16llx\n",
  100. indent, "",
  101. regs->r8, regs->r9, regs->r10, regs->r11);
  102. fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
  103. "r14: 0x%.16llx r15: 0x%.16llx\n",
  104. indent, "",
  105. regs->r12, regs->r13, regs->r14, regs->r15);
  106. fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
  107. indent, "",
  108. regs->rip, regs->rflags);
  109. }
  110. /* Segment Dump
  111. *
  112. * Input Args:
  113. * indent - Left margin indent amount
  114. * segment - KVM segment
  115. *
  116. * Output Args:
  117. * stream - Output FILE stream
  118. *
  119. * Return: None
  120. *
  121. * Dumps the state of the KVM segment given by segment, to the FILE stream
  122. * given by steam.
  123. */
  124. static void segment_dump(FILE *stream, struct kvm_segment *segment,
  125. uint8_t indent)
  126. {
  127. fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
  128. "selector: 0x%.4x type: 0x%.2x\n",
  129. indent, "", segment->base, segment->limit,
  130. segment->selector, segment->type);
  131. fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
  132. "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
  133. indent, "", segment->present, segment->dpl,
  134. segment->db, segment->s, segment->l);
  135. fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
  136. "unusable: 0x%.2x padding: 0x%.2x\n",
  137. indent, "", segment->g, segment->avl,
  138. segment->unusable, segment->padding);
  139. }
  140. /* dtable Dump
  141. *
  142. * Input Args:
  143. * indent - Left margin indent amount
  144. * dtable - KVM dtable
  145. *
  146. * Output Args:
  147. * stream - Output FILE stream
  148. *
  149. * Return: None
  150. *
  151. * Dumps the state of the KVM dtable given by dtable, to the FILE stream
  152. * given by steam.
  153. */
  154. static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
  155. uint8_t indent)
  156. {
  157. fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
  158. "padding: 0x%.4x 0x%.4x 0x%.4x\n",
  159. indent, "", dtable->base, dtable->limit,
  160. dtable->padding[0], dtable->padding[1], dtable->padding[2]);
  161. }
  162. /* System Register Dump
  163. *
  164. * Input Args:
  165. * indent - Left margin indent amount
  166. * sregs - System registers
  167. *
  168. * Output Args:
  169. * stream - Output FILE stream
  170. *
  171. * Return: None
  172. *
  173. * Dumps the state of the system registers given by sregs, to the FILE stream
  174. * given by steam.
  175. */
  176. void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
  177. uint8_t indent)
  178. {
  179. unsigned int i;
  180. fprintf(stream, "%*scs:\n", indent, "");
  181. segment_dump(stream, &sregs->cs, indent + 2);
  182. fprintf(stream, "%*sds:\n", indent, "");
  183. segment_dump(stream, &sregs->ds, indent + 2);
  184. fprintf(stream, "%*ses:\n", indent, "");
  185. segment_dump(stream, &sregs->es, indent + 2);
  186. fprintf(stream, "%*sfs:\n", indent, "");
  187. segment_dump(stream, &sregs->fs, indent + 2);
  188. fprintf(stream, "%*sgs:\n", indent, "");
  189. segment_dump(stream, &sregs->gs, indent + 2);
  190. fprintf(stream, "%*sss:\n", indent, "");
  191. segment_dump(stream, &sregs->ss, indent + 2);
  192. fprintf(stream, "%*str:\n", indent, "");
  193. segment_dump(stream, &sregs->tr, indent + 2);
  194. fprintf(stream, "%*sldt:\n", indent, "");
  195. segment_dump(stream, &sregs->ldt, indent + 2);
  196. fprintf(stream, "%*sgdt:\n", indent, "");
  197. dtable_dump(stream, &sregs->gdt, indent + 2);
  198. fprintf(stream, "%*sidt:\n", indent, "");
  199. dtable_dump(stream, &sregs->idt, indent + 2);
  200. fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
  201. "cr3: 0x%.16llx cr4: 0x%.16llx\n",
  202. indent, "",
  203. sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
  204. fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
  205. "apic_base: 0x%.16llx\n",
  206. indent, "",
  207. sregs->cr8, sregs->efer, sregs->apic_base);
  208. fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
  209. for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
  210. fprintf(stream, "%*s%.16llx\n", indent + 2, "",
  211. sregs->interrupt_bitmap[i]);
  212. }
  213. }
  214. void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
  215. {
  216. int rc;
  217. TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
  218. "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
  219. /* If needed, create page map l4 table. */
  220. if (!vm->pgd_created) {
  221. vm_paddr_t paddr = vm_phy_page_alloc(vm,
  222. KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
  223. vm->pgd = paddr;
  224. /* Set pointer to pgd tables in all the VCPUs that
  225. * have already been created. Future VCPUs will have
  226. * the value set as each one is created.
  227. */
  228. for (struct vcpu *vcpu = vm->vcpu_head; vcpu;
  229. vcpu = vcpu->next) {
  230. struct kvm_sregs sregs;
  231. /* Obtain the current system register settings */
  232. vcpu_sregs_get(vm, vcpu->id, &sregs);
  233. /* Set and store the pointer to the start of the
  234. * pgd tables.
  235. */
  236. sregs.cr3 = vm->pgd;
  237. vcpu_sregs_set(vm, vcpu->id, &sregs);
  238. }
  239. vm->pgd_created = true;
  240. }
  241. }
  242. /* VM Virtual Page Map
  243. *
  244. * Input Args:
  245. * vm - Virtual Machine
  246. * vaddr - VM Virtual Address
  247. * paddr - VM Physical Address
  248. * pgd_memslot - Memory region slot for new virtual translation tables
  249. *
  250. * Output Args: None
  251. *
  252. * Return: None
  253. *
  254. * Within the VM given by vm, creates a virtual translation for the page
  255. * starting at vaddr to the page starting at paddr.
  256. */
  257. void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
  258. uint32_t pgd_memslot)
  259. {
  260. uint16_t index[4];
  261. struct pageMapL4Entry *pml4e;
  262. TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
  263. "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
  264. TEST_ASSERT((vaddr % vm->page_size) == 0,
  265. "Virtual address not on page boundary,\n"
  266. " vaddr: 0x%lx vm->page_size: 0x%x",
  267. vaddr, vm->page_size);
  268. TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
  269. (vaddr >> vm->page_shift)),
  270. "Invalid virtual address, vaddr: 0x%lx",
  271. vaddr);
  272. TEST_ASSERT((paddr % vm->page_size) == 0,
  273. "Physical address not on page boundary,\n"
  274. " paddr: 0x%lx vm->page_size: 0x%x",
  275. paddr, vm->page_size);
  276. TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
  277. "Physical address beyond beyond maximum supported,\n"
  278. " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
  279. paddr, vm->max_gfn, vm->page_size);
  280. index[0] = (vaddr >> 12) & 0x1ffu;
  281. index[1] = (vaddr >> 21) & 0x1ffu;
  282. index[2] = (vaddr >> 30) & 0x1ffu;
  283. index[3] = (vaddr >> 39) & 0x1ffu;
  284. /* Allocate page directory pointer table if not present. */
  285. pml4e = addr_gpa2hva(vm, vm->pgd);
  286. if (!pml4e[index[3]].present) {
  287. pml4e[index[3]].address = vm_phy_page_alloc(vm,
  288. KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
  289. >> vm->page_shift;
  290. pml4e[index[3]].writable = true;
  291. pml4e[index[3]].present = true;
  292. }
  293. /* Allocate page directory table if not present. */
  294. struct pageDirectoryPointerEntry *pdpe;
  295. pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
  296. if (!pdpe[index[2]].present) {
  297. pdpe[index[2]].address = vm_phy_page_alloc(vm,
  298. KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
  299. >> vm->page_shift;
  300. pdpe[index[2]].writable = true;
  301. pdpe[index[2]].present = true;
  302. }
  303. /* Allocate page table if not present. */
  304. struct pageDirectoryEntry *pde;
  305. pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
  306. if (!pde[index[1]].present) {
  307. pde[index[1]].address = vm_phy_page_alloc(vm,
  308. KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
  309. >> vm->page_shift;
  310. pde[index[1]].writable = true;
  311. pde[index[1]].present = true;
  312. }
  313. /* Fill in page table entry. */
  314. struct pageTableEntry *pte;
  315. pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
  316. pte[index[0]].address = paddr >> vm->page_shift;
  317. pte[index[0]].writable = true;
  318. pte[index[0]].present = 1;
  319. }
  320. /* Virtual Translation Tables Dump
  321. *
  322. * Input Args:
  323. * vm - Virtual Machine
  324. * indent - Left margin indent amount
  325. *
  326. * Output Args:
  327. * stream - Output FILE stream
  328. *
  329. * Return: None
  330. *
  331. * Dumps to the FILE stream given by stream, the contents of all the
  332. * virtual translation tables for the VM given by vm.
  333. */
  334. void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
  335. {
  336. struct pageMapL4Entry *pml4e, *pml4e_start;
  337. struct pageDirectoryPointerEntry *pdpe, *pdpe_start;
  338. struct pageDirectoryEntry *pde, *pde_start;
  339. struct pageTableEntry *pte, *pte_start;
  340. if (!vm->pgd_created)
  341. return;
  342. fprintf(stream, "%*s "
  343. " no\n", indent, "");
  344. fprintf(stream, "%*s index hvaddr gpaddr "
  345. "addr w exec dirty\n",
  346. indent, "");
  347. pml4e_start = (struct pageMapL4Entry *) addr_gpa2hva(vm,
  348. vm->pgd);
  349. for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
  350. pml4e = &pml4e_start[n1];
  351. if (!pml4e->present)
  352. continue;
  353. fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
  354. " %u\n",
  355. indent, "",
  356. pml4e - pml4e_start, pml4e,
  357. addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->address,
  358. pml4e->writable, pml4e->execute_disable);
  359. pdpe_start = addr_gpa2hva(vm, pml4e->address
  360. * vm->page_size);
  361. for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
  362. pdpe = &pdpe_start[n2];
  363. if (!pdpe->present)
  364. continue;
  365. fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx "
  366. "%u %u\n",
  367. indent, "",
  368. pdpe - pdpe_start, pdpe,
  369. addr_hva2gpa(vm, pdpe),
  370. (uint64_t) pdpe->address, pdpe->writable,
  371. pdpe->execute_disable);
  372. pde_start = addr_gpa2hva(vm,
  373. pdpe->address * vm->page_size);
  374. for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
  375. pde = &pde_start[n3];
  376. if (!pde->present)
  377. continue;
  378. fprintf(stream, "%*spde 0x%-3zx %p "
  379. "0x%-12lx 0x%-10lx %u %u\n",
  380. indent, "", pde - pde_start, pde,
  381. addr_hva2gpa(vm, pde),
  382. (uint64_t) pde->address, pde->writable,
  383. pde->execute_disable);
  384. pte_start = addr_gpa2hva(vm,
  385. pde->address * vm->page_size);
  386. for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
  387. pte = &pte_start[n4];
  388. if (!pte->present)
  389. continue;
  390. fprintf(stream, "%*spte 0x%-3zx %p "
  391. "0x%-12lx 0x%-10lx %u %u "
  392. " %u 0x%-10lx\n",
  393. indent, "",
  394. pte - pte_start, pte,
  395. addr_hva2gpa(vm, pte),
  396. (uint64_t) pte->address,
  397. pte->writable,
  398. pte->execute_disable,
  399. pte->dirty,
  400. ((uint64_t) n1 << 27)
  401. | ((uint64_t) n2 << 18)
  402. | ((uint64_t) n3 << 9)
  403. | ((uint64_t) n4));
  404. }
  405. }
  406. }
  407. }
  408. }
  409. /* Set Unusable Segment
  410. *
  411. * Input Args: None
  412. *
  413. * Output Args:
  414. * segp - Pointer to segment register
  415. *
  416. * Return: None
  417. *
  418. * Sets the segment register pointed to by segp to an unusable state.
  419. */
  420. static void kvm_seg_set_unusable(struct kvm_segment *segp)
  421. {
  422. memset(segp, 0, sizeof(*segp));
  423. segp->unusable = true;
  424. }
  425. /* Set Long Mode Flat Kernel Code Segment
  426. *
  427. * Input Args:
  428. * selector - selector value
  429. *
  430. * Output Args:
  431. * segp - Pointer to KVM segment
  432. *
  433. * Return: None
  434. *
  435. * Sets up the KVM segment pointed to by segp, to be a code segment
  436. * with the selector value given by selector.
  437. */
  438. static void kvm_seg_set_kernel_code_64bit(uint16_t selector,
  439. struct kvm_segment *segp)
  440. {
  441. memset(segp, 0, sizeof(*segp));
  442. segp->selector = selector;
  443. segp->limit = 0xFFFFFFFFu;
  444. segp->s = 0x1; /* kTypeCodeData */
  445. segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
  446. * | kFlagCodeReadable
  447. */
  448. segp->g = true;
  449. segp->l = true;
  450. segp->present = 1;
  451. }
  452. /* Set Long Mode Flat Kernel Data Segment
  453. *
  454. * Input Args:
  455. * selector - selector value
  456. *
  457. * Output Args:
  458. * segp - Pointer to KVM segment
  459. *
  460. * Return: None
  461. *
  462. * Sets up the KVM segment pointed to by segp, to be a data segment
  463. * with the selector value given by selector.
  464. */
  465. static void kvm_seg_set_kernel_data_64bit(uint16_t selector,
  466. struct kvm_segment *segp)
  467. {
  468. memset(segp, 0, sizeof(*segp));
  469. segp->selector = selector;
  470. segp->limit = 0xFFFFFFFFu;
  471. segp->s = 0x1; /* kTypeCodeData */
  472. segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
  473. * | kFlagDataWritable
  474. */
  475. segp->g = true;
  476. segp->present = true;
  477. }
  478. /* Address Guest Virtual to Guest Physical
  479. *
  480. * Input Args:
  481. * vm - Virtual Machine
  482. * gpa - VM virtual address
  483. *
  484. * Output Args: None
  485. *
  486. * Return:
  487. * Equivalent VM physical address
  488. *
  489. * Translates the VM virtual address given by gva to a VM physical
  490. * address and then locates the memory region containing the VM
  491. * physical address, within the VM given by vm. When found, the host
  492. * virtual address providing the memory to the vm physical address is returned.
  493. * A TEST_ASSERT failure occurs if no region containing translated
  494. * VM virtual address exists.
  495. */
  496. vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
  497. {
  498. uint16_t index[4];
  499. struct pageMapL4Entry *pml4e;
  500. struct pageDirectoryPointerEntry *pdpe;
  501. struct pageDirectoryEntry *pde;
  502. struct pageTableEntry *pte;
  503. void *hva;
  504. TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
  505. "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
  506. index[0] = (gva >> 12) & 0x1ffu;
  507. index[1] = (gva >> 21) & 0x1ffu;
  508. index[2] = (gva >> 30) & 0x1ffu;
  509. index[3] = (gva >> 39) & 0x1ffu;
  510. if (!vm->pgd_created)
  511. goto unmapped_gva;
  512. pml4e = addr_gpa2hva(vm, vm->pgd);
  513. if (!pml4e[index[3]].present)
  514. goto unmapped_gva;
  515. pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
  516. if (!pdpe[index[2]].present)
  517. goto unmapped_gva;
  518. pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
  519. if (!pde[index[1]].present)
  520. goto unmapped_gva;
  521. pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
  522. if (!pte[index[0]].present)
  523. goto unmapped_gva;
  524. return (pte[index[0]].address * vm->page_size) + (gva & 0xfffu);
  525. unmapped_gva:
  526. TEST_ASSERT(false, "No mapping for vm virtual address, "
  527. "gva: 0x%lx", gva);
  528. }
  529. void vcpu_setup(struct kvm_vm *vm, int vcpuid)
  530. {
  531. struct kvm_sregs sregs;
  532. /* Set mode specific system register values. */
  533. vcpu_sregs_get(vm, vcpuid, &sregs);
  534. switch (vm->mode) {
  535. case VM_MODE_FLAT48PG:
  536. sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
  537. sregs.cr4 |= X86_CR4_PAE;
  538. sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
  539. kvm_seg_set_unusable(&sregs.ldt);
  540. kvm_seg_set_kernel_code_64bit(0x8, &sregs.cs);
  541. kvm_seg_set_kernel_data_64bit(0x10, &sregs.ds);
  542. kvm_seg_set_kernel_data_64bit(0x10, &sregs.es);
  543. break;
  544. default:
  545. TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
  546. }
  547. vcpu_sregs_set(vm, vcpuid, &sregs);
  548. /* If virtual translation table have been setup, set system register
  549. * to point to the tables. It's okay if they haven't been setup yet,
  550. * in that the code that sets up the virtual translation tables, will
  551. * go back through any VCPUs that have already been created and set
  552. * their values.
  553. */
  554. if (vm->pgd_created) {
  555. struct kvm_sregs sregs;
  556. vcpu_sregs_get(vm, vcpuid, &sregs);
  557. sregs.cr3 = vm->pgd;
  558. vcpu_sregs_set(vm, vcpuid, &sregs);
  559. }
  560. }
  561. /* Adds a vCPU with reasonable defaults (i.e., a stack)
  562. *
  563. * Input Args:
  564. * vcpuid - The id of the VCPU to add to the VM.
  565. * guest_code - The vCPU's entry point
  566. */
  567. void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
  568. {
  569. struct kvm_mp_state mp_state;
  570. struct kvm_regs regs;
  571. vm_vaddr_t stack_vaddr;
  572. stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
  573. DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
  574. /* Create VCPU */
  575. vm_vcpu_add(vm, vcpuid);
  576. /* Setup guest general purpose registers */
  577. vcpu_regs_get(vm, vcpuid, &regs);
  578. regs.rflags = regs.rflags | 0x2;
  579. regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
  580. regs.rip = (unsigned long) guest_code;
  581. vcpu_regs_set(vm, vcpuid, &regs);
  582. /* Setup the MP state */
  583. mp_state.mp_state = 0;
  584. vcpu_set_mp_state(vm, vcpuid, &mp_state);
  585. }
  586. /* VM VCPU CPUID Set
  587. *
  588. * Input Args:
  589. * vm - Virtual Machine
  590. * vcpuid - VCPU id
  591. * cpuid - The CPUID values to set.
  592. *
  593. * Output Args: None
  594. *
  595. * Return: void
  596. *
  597. * Set the VCPU's CPUID.
  598. */
  599. void vcpu_set_cpuid(struct kvm_vm *vm,
  600. uint32_t vcpuid, struct kvm_cpuid2 *cpuid)
  601. {
  602. struct vcpu *vcpu = vcpu_find(vm, vcpuid);
  603. int rc;
  604. TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
  605. rc = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
  606. TEST_ASSERT(rc == 0, "KVM_SET_CPUID2 failed, rc: %i errno: %i",
  607. rc, errno);
  608. }
  609. /* Create a VM with reasonable defaults
  610. *
  611. * Input Args:
  612. * vcpuid - The id of the single VCPU to add to the VM.
  613. * guest_code - The vCPU's entry point
  614. *
  615. * Output Args: None
  616. *
  617. * Return:
  618. * Pointer to opaque structure that describes the created VM.
  619. */
  620. struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code)
  621. {
  622. struct kvm_vm *vm;
  623. /* Create VM */
  624. vm = vm_create(VM_MODE_FLAT48PG, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
  625. /* Setup guest code */
  626. kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
  627. /* Setup IRQ Chip */
  628. vm_create_irqchip(vm);
  629. /* Add the first vCPU. */
  630. vm_vcpu_add_default(vm, vcpuid, guest_code);
  631. return vm;
  632. }