mmu.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __KVM_X86_MMU_H
  3. #define __KVM_X86_MMU_H
  4. #include <linux/kvm_host.h>
  5. #include "kvm_cache_regs.h"
  6. #define PT64_PT_BITS 9
  7. #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
  8. #define PT32_PT_BITS 10
  9. #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
  10. #define PT_WRITABLE_SHIFT 1
  11. #define PT_USER_SHIFT 2
  12. #define PT_PRESENT_MASK (1ULL << 0)
  13. #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
  14. #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
  15. #define PT_PWT_MASK (1ULL << 3)
  16. #define PT_PCD_MASK (1ULL << 4)
  17. #define PT_ACCESSED_SHIFT 5
  18. #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
  19. #define PT_DIRTY_SHIFT 6
  20. #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
  21. #define PT_PAGE_SIZE_SHIFT 7
  22. #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
  23. #define PT_PAT_MASK (1ULL << 7)
  24. #define PT_GLOBAL_MASK (1ULL << 8)
  25. #define PT64_NX_SHIFT 63
  26. #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
  27. #define PT_PAT_SHIFT 7
  28. #define PT_DIR_PAT_SHIFT 12
  29. #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
  30. #define PT32_DIR_PSE36_SIZE 4
  31. #define PT32_DIR_PSE36_SHIFT 13
  32. #define PT32_DIR_PSE36_MASK \
  33. (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
  34. #define PT64_ROOT_5LEVEL 5
  35. #define PT64_ROOT_4LEVEL 4
  36. #define PT32_ROOT_LEVEL 2
  37. #define PT32E_ROOT_LEVEL 3
  38. static inline u64 rsvd_bits(int s, int e)
  39. {
  40. if (e < s)
  41. return 0;
  42. return ((1ULL << (e - s + 1)) - 1) << s;
  43. }
  44. void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
  45. void
  46. reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
  47. void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
  48. void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
  49. void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
  50. bool accessed_dirty, gpa_t new_eptp);
  51. bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
  52. int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
  53. u64 fault_address, char *insn, int insn_len);
  54. static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
  55. {
  56. if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
  57. return kvm->arch.n_max_mmu_pages -
  58. kvm->arch.n_used_mmu_pages;
  59. return 0;
  60. }
  61. static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
  62. {
  63. if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
  64. return 0;
  65. return kvm_mmu_load(vcpu);
  66. }
  67. static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
  68. {
  69. BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
  70. return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
  71. ? cr3 & X86_CR3_PCID_MASK
  72. : 0;
  73. }
  74. static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
  75. {
  76. return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
  77. }
  78. static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
  79. {
  80. if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
  81. vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
  82. kvm_get_active_pcid(vcpu));
  83. }
  84. /*
  85. * Currently, we have two sorts of write-protection, a) the first one
  86. * write-protects guest page to sync the guest modification, b) another one is
  87. * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
  88. * between these two sorts are:
  89. * 1) the first case clears SPTE_MMU_WRITEABLE bit.
  90. * 2) the first case requires flushing tlb immediately avoiding corrupting
  91. * shadow page table between all vcpus so it should be in the protection of
  92. * mmu-lock. And the another case does not need to flush tlb until returning
  93. * the dirty bitmap to userspace since it only write-protects the page
  94. * logged in the bitmap, that means the page in the dirty bitmap is not
  95. * missed, so it can flush tlb out of mmu-lock.
  96. *
  97. * So, there is the problem: the first case can meet the corrupted tlb caused
  98. * by another case which write-protects pages but without flush tlb
  99. * immediately. In order to making the first case be aware this problem we let
  100. * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
  101. * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
  102. *
  103. * Anyway, whenever a spte is updated (only permission and status bits are
  104. * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
  105. * readonly, if that happens, we need to flush tlb. Fortunately,
  106. * mmu_spte_update() has already handled it perfectly.
  107. *
  108. * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
  109. * - if we want to see if it has writable tlb entry or if the spte can be
  110. * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
  111. * case, otherwise
  112. * - if we fix page fault on the spte or do write-protection by dirty logging,
  113. * check PT_WRITABLE_MASK.
  114. *
  115. * TODO: introduce APIs to split these two cases.
  116. */
  117. static inline int is_writable_pte(unsigned long pte)
  118. {
  119. return pte & PT_WRITABLE_MASK;
  120. }
  121. static inline bool is_write_protection(struct kvm_vcpu *vcpu)
  122. {
  123. return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
  124. }
  125. /*
  126. * Check if a given access (described through the I/D, W/R and U/S bits of a
  127. * page fault error code pfec) causes a permission fault with the given PTE
  128. * access rights (in ACC_* format).
  129. *
  130. * Return zero if the access does not fault; return the page fault error code
  131. * if the access faults.
  132. */
  133. static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  134. unsigned pte_access, unsigned pte_pkey,
  135. unsigned pfec)
  136. {
  137. int cpl = kvm_x86_ops->get_cpl(vcpu);
  138. unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
  139. /*
  140. * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
  141. *
  142. * If CPL = 3, SMAP applies to all supervisor-mode data accesses
  143. * (these are implicit supervisor accesses) regardless of the value
  144. * of EFLAGS.AC.
  145. *
  146. * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
  147. * the result in X86_EFLAGS_AC. We then insert it in place of
  148. * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
  149. * but it will be one in index if SMAP checks are being overridden.
  150. * It is important to keep this branchless.
  151. */
  152. unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
  153. int index = (pfec >> 1) +
  154. (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
  155. bool fault = (mmu->permissions[index] >> pte_access) & 1;
  156. u32 errcode = PFERR_PRESENT_MASK;
  157. WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
  158. if (unlikely(mmu->pkru_mask)) {
  159. u32 pkru_bits, offset;
  160. /*
  161. * PKRU defines 32 bits, there are 16 domains and 2
  162. * attribute bits per domain in pkru. pte_pkey is the
  163. * index of the protection domain, so pte_pkey * 2 is
  164. * is the index of the first bit for the domain.
  165. */
  166. pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
  167. /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
  168. offset = (pfec & ~1) +
  169. ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
  170. pkru_bits &= mmu->pkru_mask >> offset;
  171. errcode |= -pkru_bits & PFERR_PK_MASK;
  172. fault |= (pkru_bits != 0);
  173. }
  174. return -(u32)fault & errcode;
  175. }
  176. void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
  177. void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
  178. void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
  179. void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
  180. bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
  181. struct kvm_memory_slot *slot, u64 gfn);
  182. int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
  183. #endif