|
@@ -4454,6 +4454,75 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
|
|
init_kvm_mmu(vcpu);
|
|
init_kvm_mmu(vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* The return value indicates if tlb flush on all vcpus is needed. */
|
|
|
|
+typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap);
|
|
|
|
+
|
|
|
|
+/* The caller should hold mmu-lock before calling this function. */
|
|
|
|
+static bool
|
|
|
|
+slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|
|
|
+ slot_level_handler fn, int start_level, int end_level,
|
|
|
|
+ gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
|
|
|
|
+{
|
|
|
|
+ struct slot_rmap_walk_iterator iterator;
|
|
|
|
+ bool flush = false;
|
|
|
|
+
|
|
|
|
+ for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
|
|
|
|
+ end_gfn, &iterator) {
|
|
|
|
+ if (iterator.rmap)
|
|
|
|
+ flush |= fn(kvm, iterator.rmap);
|
|
|
|
+
|
|
|
|
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
|
|
|
|
+ if (flush && lock_flush_tlb) {
|
|
|
|
+ kvm_flush_remote_tlbs(kvm);
|
|
|
|
+ flush = false;
|
|
|
|
+ }
|
|
|
|
+ cond_resched_lock(&kvm->mmu_lock);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (flush && lock_flush_tlb) {
|
|
|
|
+ kvm_flush_remote_tlbs(kvm);
|
|
|
|
+ flush = false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return flush;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool
|
|
|
|
+slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|
|
|
+ slot_level_handler fn, int start_level, int end_level,
|
|
|
|
+ bool lock_flush_tlb)
|
|
|
|
+{
|
|
|
|
+ return slot_handle_level_range(kvm, memslot, fn, start_level,
|
|
|
|
+ end_level, memslot->base_gfn,
|
|
|
|
+ memslot->base_gfn + memslot->npages - 1,
|
|
|
|
+ lock_flush_tlb);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool
|
|
|
|
+slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|
|
|
+ slot_level_handler fn, bool lock_flush_tlb)
|
|
|
|
+{
|
|
|
|
+ return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
|
|
|
|
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool
|
|
|
|
+slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|
|
|
+ slot_level_handler fn, bool lock_flush_tlb)
|
|
|
|
+{
|
|
|
|
+ return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
|
|
|
|
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool
|
|
|
|
+slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|
|
|
+ slot_level_handler fn, bool lock_flush_tlb)
|
|
|
|
+{
|
|
|
|
+ return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
|
|
|
|
+ PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
|
|
|
|
+}
|
|
|
|
+
|
|
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
|
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
|
struct kvm_memory_slot *memslot)
|
|
struct kvm_memory_slot *memslot)
|
|
{
|
|
{
|