|
@@ -2597,8 +2597,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (pte_access & ACC_WRITE_MASK)
|
|
|
+ if (pte_access & ACC_WRITE_MASK) {
|
|
|
mark_page_dirty(vcpu->kvm, gfn);
|
|
|
+ spte |= shadow_dirty_mask;
|
|
|
+ }
|
|
|
|
|
|
set_pte:
|
|
|
if (mmu_spte_update(sptep, spte))
|
|
@@ -2914,6 +2916,18 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|
|
*/
|
|
|
gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
|
|
|
|
|
|
+ /*
|
|
|
+ * Theoretically we could also set dirty bit (and flush TLB) here in
|
|
|
+ * order to eliminate unnecessary PML logging. See comments in
|
|
|
+ * set_spte. But fast_page_fault is very unlikely to happen with PML
|
|
|
+ * enabled, so we do not do this. This might result in the same GPA
|
|
|
+ * to be logged in PML buffer again when the write really happens, and
|
|
|
+ * eventually to be called by mark_page_dirty twice. But it's also no
|
|
|
+ * harm. This also avoids the TLB flush needed after setting dirty bit
|
|
|
+ * so non-PML cases won't be impacted.
|
|
|
+ *
|
|
|
+ * Compare with set_spte where instead shadow_dirty_mask is set.
|
|
|
+ */
|
|
|
if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
|
|
|
mark_page_dirty(vcpu->kvm, gfn);
|
|
|
|