|
@@ -183,6 +183,7 @@ static u64 __read_mostly shadow_user_mask;
|
|
|
static u64 __read_mostly shadow_accessed_mask;
|
|
|
static u64 __read_mostly shadow_dirty_mask;
|
|
|
static u64 __read_mostly shadow_mmio_mask;
|
|
|
+static u64 __read_mostly shadow_mmio_value;
|
|
|
static u64 __read_mostly shadow_present_mask;
|
|
|
|
|
|
/*
|
|
@@ -207,8 +208,10 @@ static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIF
|
|
|
static void mmu_spte_set(u64 *sptep, u64 spte);
|
|
|
static void mmu_free_roots(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
-void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
|
|
|
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
|
|
|
{
|
|
|
+ BUG_ON((mmio_mask & mmio_value) != mmio_value);
|
|
|
+ shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
|
|
|
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
|
|
@@ -270,7 +273,7 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
|
|
|
u64 mask = generation_mmio_spte_mask(gen);
|
|
|
|
|
|
access &= ACC_WRITE_MASK | ACC_USER_MASK;
|
|
|
- mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;
|
|
|
+ mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
|
|
|
|
|
|
trace_mark_mmio_spte(sptep, gfn, access, gen);
|
|
|
mmu_spte_set(sptep, mask);
|
|
@@ -278,7 +281,7 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
|
|
|
|
|
|
static bool is_mmio_spte(u64 spte)
|
|
|
{
|
|
|
- return (spte & shadow_mmio_mask) == shadow_mmio_mask;
|
|
|
+ return (spte & shadow_mmio_mask) == shadow_mmio_value;
|
|
|
}
|
|
|
|
|
|
static gfn_t get_mmio_spte_gfn(u64 spte)
|