|
@@ -1417,18 +1417,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
struct rmap_iterator uninitialized_var(iter);
|
|
|
int young = 0;
|
|
|
|
|
|
- /*
|
|
|
- * In case of absence of EPT Access and Dirty Bits supports,
|
|
|
- * emulate the accessed bit for EPT, by checking if this page has
|
|
|
- * an EPT mapping, and clearing it if it does. On the next access,
|
|
|
- * a new EPT mapping will be established.
|
|
|
- * This has some overhead, but not as much as the cost of swapping
|
|
|
- * out actively used pages or breaking up actively used hugepages.
|
|
|
- */
|
|
|
- if (!shadow_accessed_mask) {
|
|
|
- young = kvm_unmap_rmapp(kvm, rmapp, slot, gfn, level, data);
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ BUG_ON(!shadow_accessed_mask);
|
|
|
|
|
|
for (sptep = rmap_get_first(*rmapp, &iter); sptep;
|
|
|
sptep = rmap_get_next(&iter)) {
|
|
@@ -1440,7 +1429,6 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
(unsigned long *)sptep);
|
|
|
}
|
|
|
}
|
|
|
-out:
|
|
|
trace_kvm_age_page(gfn, level, slot, young);
|
|
|
return young;
|
|
|
}
|
|
@@ -1489,9 +1477,29 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
|
|
kvm_flush_remote_tlbs(vcpu->kvm);
|
|
|
}
|
|
|
|
|
|
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
|
|
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
|
|
{
|
|
|
- return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
|
|
|
+ /*
|
|
|
+ * In case of absence of EPT Access and Dirty Bits supports,
|
|
|
+ * emulate the accessed bit for EPT, by checking if this page has
|
|
|
+ * an EPT mapping, and clearing it if it does. On the next access,
|
|
|
+ * a new EPT mapping will be established.
|
|
|
+ * This has some overhead, but not as much as the cost of swapping
|
|
|
+ * out actively used pages or breaking up actively used hugepages.
|
|
|
+ */
|
|
|
+ if (!shadow_accessed_mask) {
|
|
|
+ /*
|
|
|
+ * We are holding the kvm->mmu_lock, and we are blowing up
|
|
|
+ * shadow PTEs. MMU notifier consumers need to be kept at bay.
|
|
|
+ * This is correct as long as we don't decouple the mmu_lock
|
|
|
+ * protected regions (like invalidate_range_start|end does).
|
|
|
+ */
|
|
|
+ kvm->mmu_notifier_seq++;
|
|
|
+ return kvm_handle_hva_range(kvm, start, end, 0,
|
|
|
+ kvm_unmap_rmapp);
|
|
|
+ }
|
|
|
+
|
|
|
+ return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
|
|
|
}
|
|
|
|
|
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|