|
@@ -175,6 +175,7 @@ static u64 __read_mostly shadow_user_mask;
|
|
|
static u64 __read_mostly shadow_accessed_mask;
|
|
|
static u64 __read_mostly shadow_dirty_mask;
|
|
|
static u64 __read_mostly shadow_mmio_mask;
|
|
|
+static u64 __read_mostly shadow_present_mask;
|
|
|
|
|
|
static void mmu_spte_set(u64 *sptep, u64 spte);
|
|
|
static void mmu_free_roots(struct kvm_vcpu *vcpu);
|
|
@@ -282,13 +283,14 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
|
|
|
}
|
|
|
|
|
|
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
|
|
- u64 dirty_mask, u64 nx_mask, u64 x_mask)
|
|
|
+ u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask)
|
|
|
{
|
|
|
shadow_user_mask = user_mask;
|
|
|
shadow_accessed_mask = accessed_mask;
|
|
|
shadow_dirty_mask = dirty_mask;
|
|
|
shadow_nx_mask = nx_mask;
|
|
|
shadow_x_mask = x_mask;
|
|
|
+ shadow_present_mask = p_mask;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
|
|
|
|
|
@@ -2245,10 +2247,9 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
|
{
|
|
|
u64 spte;
|
|
|
|
|
|
- BUILD_BUG_ON(VMX_EPT_READABLE_MASK != PT_PRESENT_MASK ||
|
|
|
- VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
|
|
|
+ BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
|
|
|
|
|
|
- spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
|
|
|
+ spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
|
|
|
shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
|
|
|
|
|
|
mmu_spte_set(sptep, spte);
|
|
@@ -2515,13 +2516,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
|
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
|
|
|
bool can_unsync, bool host_writable)
|
|
|
{
|
|
|
- u64 spte;
|
|
|
+ u64 spte = 0;
|
|
|
int ret = 0;
|
|
|
|
|
|
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
|
|
|
return 0;
|
|
|
|
|
|
- spte = PT_PRESENT_MASK;
|
|
|
+ spte |= shadow_present_mask;
|
|
|
if (!speculative)
|
|
|
spte |= shadow_accessed_mask;
|
|
|
|