kvm_book3s_64.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2010
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #ifndef __ASM_KVM_BOOK3S_64_H__
  20. #define __ASM_KVM_BOOK3S_64_H__
  21. #include <linux/string.h>
  22. #include <asm/bitops.h>
  23. #include <asm/book3s/64/mmu-hash.h>
  24. #include <asm/cpu_has_feature.h>
  25. #include <asm/ppc-opcode.h>
  26. #ifdef CONFIG_PPC_PSERIES
  27. static inline bool kvmhv_on_pseries(void)
  28. {
  29. return !cpu_has_feature(CPU_FTR_HVMODE);
  30. }
  31. #else
  32. static inline bool kvmhv_on_pseries(void)
  33. {
  34. return false;
  35. }
  36. #endif
  37. /*
  38. * Structure for a nested guest, that is, for a guest that is managed by
  39. * one of our guests.
  40. */
  41. struct kvm_nested_guest {
  42. struct kvm *l1_host; /* L1 VM that owns this nested guest */
  43. int l1_lpid; /* lpid L1 guest thinks this guest is */
  44. int shadow_lpid; /* real lpid of this nested guest */
  45. pgd_t *shadow_pgtable; /* our page table for this guest */
  46. u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
  47. u64 process_table; /* process table entry for this guest */
  48. long refcnt; /* number of pointers to this struct */
  49. struct mutex tlb_lock; /* serialize page faults and tlbies */
  50. struct kvm_nested_guest *next;
  51. cpumask_t need_tlb_flush;
  52. cpumask_t cpu_in_guest;
  53. short prev_cpu[NR_CPUS];
  54. };
  55. /*
  56. * We define a nested rmap entry as a single 64-bit quantity
  57. * 0xFFF0000000000000 12-bit lpid field
  58. * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number
  59. * 0x0000000000000001 1-bit single entry flag
  60. */
  61. #define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL
  62. #define RMAP_NESTED_LPID_SHIFT (52)
  63. #define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL
  64. #define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
  65. /* Structure for a nested guest rmap entry */
  66. struct rmap_nested {
  67. struct llist_node list;
  68. u64 rmap;
  69. };
  70. /*
  71. * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
  72. * safe against removal of the list entry or NULL list
  73. * @pos: a (struct rmap_nested *) to use as a loop cursor
  74. * @node: pointer to the first entry
  75. * NOTE: this can be NULL
  76. * @rmapp: an (unsigned long *) in which to return the rmap entries on each
  77. * iteration
  78. * NOTE: this must point to already allocated memory
  79. *
  80. * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
  81. * rmap entry in the memslot. The list is always terminated by a "single entry"
  82. * stored in the list element of the final entry of the llist. If there is ONLY
  83. * a single entry then this is itself in the rmap entry of the memslot, not a
  84. * llist head pointer.
  85. *
  86. * Note that the iterator below assumes that a nested rmap entry is always
  87. * non-zero. This is true for our usage because the LPID field is always
  88. * non-zero (zero is reserved for the host).
  89. *
  90. * This should be used to iterate over the list of rmap_nested entries with
  91. * processing done on the u64 rmap value given by each iteration. This is safe
  92. * against removal of list entries and it is always safe to call free on (pos).
  93. *
  94. * e.g.
  95. * struct rmap_nested *cursor;
  96. * struct llist_node *first;
  97. * unsigned long rmap;
  98. * for_each_nest_rmap_safe(cursor, first, &rmap) {
  99. * do_something(rmap);
  100. * free(cursor);
  101. * }
  102. */
  103. #define for_each_nest_rmap_safe(pos, node, rmapp) \
  104. for ((pos) = llist_entry((node), typeof(*(pos)), list); \
  105. (node) && \
  106. (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
  107. ((u64) (node)) : ((pos)->rmap))) && \
  108. (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
  109. ((struct llist_node *) ((pos) = NULL)) : \
  110. (pos)->list.next)), true); \
  111. (pos) = llist_entry((node), typeof(*(pos)), list))
  112. struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
  113. bool create);
  114. void kvmhv_put_nested(struct kvm_nested_guest *gp);
  115. int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
  116. /* Encoding of first parameter for H_TLB_INVALIDATE */
  117. #define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
  118. ___PPC_R(r))
  119. /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
  120. #define PPC_MIN_HPT_ORDER 18
  121. #define PPC_MAX_HPT_ORDER 46
  122. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  123. static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
  124. {
  125. preempt_disable();
  126. return &get_paca()->shadow_vcpu;
  127. }
  128. static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
  129. {
  130. preempt_enable();
  131. }
  132. #endif
  133. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  134. static inline bool kvm_is_radix(struct kvm *kvm)
  135. {
  136. return kvm->arch.radix;
  137. }
  138. #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
  139. #endif
  140. /*
  141. * We use a lock bit in HPTE dword 0 to synchronize updates and
  142. * accesses to each HPTE, and another bit to indicate non-present
  143. * HPTEs.
  144. */
  145. #define HPTE_V_HVLOCK 0x40UL
  146. #define HPTE_V_ABSENT 0x20UL
  147. /*
  148. * We use this bit in the guest_rpte field of the revmap entry
  149. * to indicate a modified HPTE.
  150. */
  151. #define HPTE_GR_MODIFIED (1ul << 62)
  152. /* These bits are reserved in the guest view of the HPTE */
  153. #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
  154. static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
  155. {
  156. unsigned long tmp, old;
  157. __be64 be_lockbit, be_bits;
  158. /*
  159. * We load/store in native endian, but the HTAB is in big endian. If
  160. * we byte swap all data we apply on the PTE we're implicitly correct
  161. * again.
  162. */
  163. be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
  164. be_bits = cpu_to_be64(bits);
  165. asm volatile(" ldarx %0,0,%2\n"
  166. " and. %1,%0,%3\n"
  167. " bne 2f\n"
  168. " or %0,%0,%4\n"
  169. " stdcx. %0,0,%2\n"
  170. " beq+ 2f\n"
  171. " mr %1,%3\n"
  172. "2: isync"
  173. : "=&r" (tmp), "=&r" (old)
  174. : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
  175. : "cc", "memory");
  176. return old == 0;
  177. }
  178. static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
  179. {
  180. hpte_v &= ~HPTE_V_HVLOCK;
  181. asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
  182. hpte[0] = cpu_to_be64(hpte_v);
  183. }
  184. /* Without barrier */
  185. static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
  186. {
  187. hpte_v &= ~HPTE_V_HVLOCK;
  188. hpte[0] = cpu_to_be64(hpte_v);
  189. }
  190. /*
  191. * These functions encode knowledge of the POWER7/8/9 hardware
  192. * interpretations of the HPTE LP (large page size) field.
  193. */
  194. static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
  195. {
  196. unsigned int lphi;
  197. if (!(h & HPTE_V_LARGE))
  198. return 12; /* 4kB */
  199. lphi = (l >> 16) & 0xf;
  200. switch ((l >> 12) & 0xf) {
  201. case 0:
  202. return !lphi ? 24 : 0; /* 16MB */
  203. break;
  204. case 1:
  205. return 16; /* 64kB */
  206. break;
  207. case 3:
  208. return !lphi ? 34 : 0; /* 16GB */
  209. break;
  210. case 7:
  211. return (16 << 8) + 12; /* 64kB in 4kB */
  212. break;
  213. case 8:
  214. if (!lphi)
  215. return (24 << 8) + 16; /* 16MB in 64kkB */
  216. if (lphi == 3)
  217. return (24 << 8) + 12; /* 16MB in 4kB */
  218. break;
  219. }
  220. return 0;
  221. }
  222. static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
  223. {
  224. return kvmppc_hpte_page_shifts(h, l) & 0xff;
  225. }
  226. static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
  227. {
  228. int tmp = kvmppc_hpte_page_shifts(h, l);
  229. if (tmp >= 0x100)
  230. tmp >>= 8;
  231. return tmp;
  232. }
  233. static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
  234. {
  235. int shift = kvmppc_hpte_actual_page_shift(v, r);
  236. if (shift)
  237. return 1ul << shift;
  238. return 0;
  239. }
  240. static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
  241. {
  242. switch (base_shift) {
  243. case 12:
  244. switch (actual_shift) {
  245. case 12:
  246. return 0;
  247. case 16:
  248. return 7;
  249. case 24:
  250. return 0x38;
  251. }
  252. break;
  253. case 16:
  254. switch (actual_shift) {
  255. case 16:
  256. return 1;
  257. case 24:
  258. return 8;
  259. }
  260. break;
  261. case 24:
  262. return 0;
  263. }
  264. return -1;
  265. }
  266. static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
  267. unsigned long pte_index)
  268. {
  269. int a_pgshift, b_pgshift;
  270. unsigned long rb = 0, va_low, sllp;
  271. b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
  272. if (a_pgshift >= 0x100) {
  273. b_pgshift &= 0xff;
  274. a_pgshift >>= 8;
  275. }
  276. /*
  277. * Ignore the top 14 bits of va
  278. * v have top two bits covering segment size, hence move
  279. * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
  280. * AVA field in v also have the lower 23 bits ignored.
  281. * For base page size 4K we need 14 .. 65 bits (so need to
  282. * collect extra 11 bits)
  283. * For others we need 14..14+i
  284. */
  285. /* This covers 14..54 bits of va*/
  286. rb = (v & ~0x7fUL) << 16; /* AVA field */
  287. /*
  288. * AVA in v had cleared lower 23 bits. We need to derive
  289. * that from pteg index
  290. */
  291. va_low = pte_index >> 3;
  292. if (v & HPTE_V_SECONDARY)
  293. va_low = ~va_low;
  294. /*
  295. * get the vpn bits from va_low using reverse of hashing.
  296. * In v we have va with 23 bits dropped and then left shifted
  297. * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
  298. * right shift it with (SID_SHIFT - (23 - 7))
  299. */
  300. if (!(v & HPTE_V_1TB_SEG))
  301. va_low ^= v >> (SID_SHIFT - 16);
  302. else
  303. va_low ^= v >> (SID_SHIFT_1T - 16);
  304. va_low &= 0x7ff;
  305. if (b_pgshift <= 12) {
  306. if (a_pgshift > 12) {
  307. sllp = (a_pgshift == 16) ? 5 : 4;
  308. rb |= sllp << 5; /* AP field */
  309. }
  310. rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
  311. } else {
  312. int aval_shift;
  313. /*
  314. * remaining bits of AVA/LP fields
  315. * Also contain the rr bits of LP
  316. */
  317. rb |= (va_low << b_pgshift) & 0x7ff000;
  318. /*
  319. * Now clear not needed LP bits based on actual psize
  320. */
  321. rb &= ~((1ul << a_pgshift) - 1);
  322. /*
  323. * AVAL field 58..77 - base_page_shift bits of va
  324. * we have space for 58..64 bits, Missing bits should
  325. * be zero filled. +1 is to take care of L bit shift
  326. */
  327. aval_shift = 64 - (77 - b_pgshift) + 1;
  328. rb |= ((va_low << aval_shift) & 0xfe);
  329. rb |= 1; /* L field */
  330. rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
  331. }
  332. rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
  333. return rb;
  334. }
  335. static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
  336. {
  337. return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
  338. }
  339. static inline int hpte_is_writable(unsigned long ptel)
  340. {
  341. unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
  342. return pp != PP_RXRX && pp != PP_RXXX;
  343. }
  344. static inline unsigned long hpte_make_readonly(unsigned long ptel)
  345. {
  346. if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
  347. ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
  348. else
  349. ptel |= PP_RXRX;
  350. return ptel;
  351. }
  352. static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
  353. {
  354. unsigned int wimg = hptel & HPTE_R_WIMG;
  355. /* Handle SAO */
  356. if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
  357. cpu_has_feature(CPU_FTR_ARCH_206))
  358. wimg = HPTE_R_M;
  359. if (!is_ci)
  360. return wimg == HPTE_R_M;
  361. /*
  362. * if host is mapped cache inhibited, make sure hptel also have
  363. * cache inhibited.
  364. */
  365. if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
  366. return false;
  367. return !!(wimg & HPTE_R_I);
  368. }
  369. /*
  370. * If it's present and writable, atomically set dirty and referenced bits and
  371. * return the PTE, otherwise return 0.
  372. */
  373. static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
  374. {
  375. pte_t old_pte, new_pte = __pte(0);
  376. while (1) {
  377. /*
  378. * Make sure we don't reload from ptep
  379. */
  380. old_pte = READ_ONCE(*ptep);
  381. /*
  382. * wait until H_PAGE_BUSY is clear then set it atomically
  383. */
  384. if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
  385. cpu_relax();
  386. continue;
  387. }
  388. /* If pte is not present return None */
  389. if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
  390. return __pte(0);
  391. new_pte = pte_mkyoung(old_pte);
  392. if (writing && pte_write(old_pte))
  393. new_pte = pte_mkdirty(new_pte);
  394. if (pte_xchg(ptep, old_pte, new_pte))
  395. break;
  396. }
  397. return new_pte;
  398. }
  399. static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
  400. {
  401. if (key)
  402. return PP_RWRX <= pp && pp <= PP_RXRX;
  403. return true;
  404. }
  405. static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
  406. {
  407. if (key)
  408. return pp == PP_RWRW;
  409. return pp <= PP_RWRW;
  410. }
  411. static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
  412. {
  413. unsigned long skey;
  414. skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
  415. ((hpte_r & HPTE_R_KEY_LO) >> 9);
  416. return (amr >> (62 - 2 * skey)) & 3;
  417. }
  418. static inline void lock_rmap(unsigned long *rmap)
  419. {
  420. do {
  421. while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
  422. cpu_relax();
  423. } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
  424. }
  425. static inline void unlock_rmap(unsigned long *rmap)
  426. {
  427. __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
  428. }
  429. static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
  430. unsigned long pagesize)
  431. {
  432. unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
  433. if (pagesize <= PAGE_SIZE)
  434. return true;
  435. return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
  436. }
  437. /*
  438. * This works for 4k, 64k and 16M pages on POWER7,
  439. * and 4k and 16M pages on PPC970.
  440. */
  441. static inline unsigned long slb_pgsize_encoding(unsigned long psize)
  442. {
  443. unsigned long senc = 0;
  444. if (psize > 0x1000) {
  445. senc = SLB_VSID_L;
  446. if (psize == 0x10000)
  447. senc |= SLB_VSID_LP_01;
  448. }
  449. return senc;
  450. }
  451. static inline int is_vrma_hpte(unsigned long hpte_v)
  452. {
  453. return (hpte_v & ~0xffffffUL) ==
  454. (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
  455. }
  456. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  457. /*
  458. * Note modification of an HPTE; set the HPTE modified bit
  459. * if anyone is interested.
  460. */
  461. static inline void note_hpte_modification(struct kvm *kvm,
  462. struct revmap_entry *rev)
  463. {
  464. if (atomic_read(&kvm->arch.hpte_mod_interest))
  465. rev->guest_rpte |= HPTE_GR_MODIFIED;
  466. }
  467. /*
  468. * Like kvm_memslots(), but for use in real mode when we can't do
  469. * any RCU stuff (since the secondary threads are offline from the
  470. * kernel's point of view), and we can't print anything.
  471. * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
  472. */
  473. static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
  474. {
  475. return rcu_dereference_raw_notrace(kvm->memslots[0]);
  476. }
  477. extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
  478. extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
  479. extern void kvmhv_rm_send_ipi(int cpu);
  480. static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
  481. {
  482. /* HPTEs are 2**4 bytes long */
  483. return 1UL << (hpt->order - 4);
  484. }
  485. static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
  486. {
  487. /* 128 (2**7) bytes in each HPTEG */
  488. return (1UL << (hpt->order - 7)) - 1;
  489. }
  490. /* Set bits in a dirty bitmap, which is in LE format */
  491. static inline void set_dirty_bits(unsigned long *map, unsigned long i,
  492. unsigned long npages)
  493. {
  494. if (npages >= 8)
  495. memset((char *)map + i / 8, 0xff, npages / 8);
  496. else
  497. for (; npages; ++i, --npages)
  498. __set_bit_le(i, map);
  499. }
  500. static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
  501. unsigned long npages)
  502. {
  503. if (npages >= 8)
  504. memset((char *)map + i / 8, 0xff, npages / 8);
  505. else
  506. for (; npages; ++i, --npages)
  507. set_bit_le(i, map);
  508. }
  509. static inline u64 sanitize_msr(u64 msr)
  510. {
  511. msr &= ~MSR_HV;
  512. msr |= MSR_ME;
  513. return msr;
  514. }
  515. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  516. static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
  517. {
  518. vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
  519. vcpu->arch.regs.xer = vcpu->arch.xer_tm;
  520. vcpu->arch.regs.link = vcpu->arch.lr_tm;
  521. vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
  522. vcpu->arch.amr = vcpu->arch.amr_tm;
  523. vcpu->arch.ppr = vcpu->arch.ppr_tm;
  524. vcpu->arch.dscr = vcpu->arch.dscr_tm;
  525. vcpu->arch.tar = vcpu->arch.tar_tm;
  526. memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
  527. sizeof(vcpu->arch.regs.gpr));
  528. vcpu->arch.fp = vcpu->arch.fp_tm;
  529. vcpu->arch.vr = vcpu->arch.vr_tm;
  530. vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
  531. }
  532. static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
  533. {
  534. vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
  535. vcpu->arch.xer_tm = vcpu->arch.regs.xer;
  536. vcpu->arch.lr_tm = vcpu->arch.regs.link;
  537. vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
  538. vcpu->arch.amr_tm = vcpu->arch.amr;
  539. vcpu->arch.ppr_tm = vcpu->arch.ppr;
  540. vcpu->arch.dscr_tm = vcpu->arch.dscr;
  541. vcpu->arch.tar_tm = vcpu->arch.tar;
  542. memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
  543. sizeof(vcpu->arch.regs.gpr));
  544. vcpu->arch.fp_tm = vcpu->arch.fp;
  545. vcpu->arch.vr_tm = vcpu->arch.vr;
  546. vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
  547. }
  548. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  549. extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
  550. unsigned long gpa, unsigned int level,
  551. unsigned long mmu_seq, unsigned int lpid,
  552. unsigned long *rmapp, struct rmap_nested **n_rmap);
  553. extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
  554. struct rmap_nested **n_rmap);
  555. extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
  556. struct kvm_memory_slot *memslot,
  557. unsigned long gpa, unsigned long hpa,
  558. unsigned long nbytes);
  559. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  560. #endif /* __ASM_KVM_BOOK3S_64_H__ */