|
|
@@ -851,6 +851,17 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
|
|
|
+ bool no_dirty_log)
|
|
|
+{
|
|
|
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
|
|
|
+ return false;
|
|
|
+ if (no_dirty_log && slot->dirty_bitmap)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
static struct kvm_memory_slot *
|
|
|
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
|
bool no_dirty_log)
|
|
|
@@ -858,25 +869,22 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
|
struct kvm_memory_slot *slot;
|
|
|
|
|
|
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
|
|
|
- if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
|
|
|
- (no_dirty_log && slot->dirty_bitmap))
|
|
|
+ if (!memslot_valid_for_gpte(slot, no_dirty_log))
|
|
|
slot = NULL;
|
|
|
|
|
|
return slot;
|
|
|
}
|
|
|
|
|
|
-static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
|
|
|
-{
|
|
|
- return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
|
|
|
-}
|
|
|
-
|
|
|
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
|
|
|
bool *force_pt_level)
|
|
|
{
|
|
|
int host_level, level, max_level;
|
|
|
+ struct kvm_memory_slot *slot;
|
|
|
+
|
|
|
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
|
|
|
|
|
|
if (likely(!*force_pt_level))
|
|
|
- *force_pt_level = mapping_level_dirty_bitmap(vcpu, large_gfn);
|
|
|
+ *force_pt_level = !memslot_valid_for_gpte(slot, true);
|
|
|
if (unlikely(*force_pt_level))
|
|
|
return PT_PAGE_TABLE_LEVEL;
|
|
|
|