|
@@ -811,8 +811,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
|
|
|
int i;
|
|
|
|
|
|
slot = gfn_to_memslot(kvm, gfn);
|
|
|
- for (i = PT_DIRECTORY_LEVEL;
|
|
|
- i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
|
+ for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
linfo = lpage_info_slot(gfn, slot, i);
|
|
|
linfo->write_count += 1;
|
|
|
}
|
|
@@ -826,8 +825,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
|
|
|
int i;
|
|
|
|
|
|
slot = gfn_to_memslot(kvm, gfn);
|
|
|
- for (i = PT_DIRECTORY_LEVEL;
|
|
|
- i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
|
+ for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
linfo = lpage_info_slot(gfn, slot, i);
|
|
|
linfo->write_count -= 1;
|
|
|
WARN_ON(linfo->write_count < 0);
|
|
@@ -858,8 +856,7 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
|
|
|
|
|
|
page_size = kvm_host_page_size(kvm, gfn);
|
|
|
|
|
|
- for (i = PT_PAGE_TABLE_LEVEL;
|
|
|
- i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
|
|
|
+ for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
if (page_size >= KVM_HPAGE_SIZE(i))
|
|
|
ret = i;
|
|
|
else
|
|
@@ -1344,8 +1341,7 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
|
|
|
|
|
|
slot = gfn_to_memslot(kvm, gfn);
|
|
|
|
|
|
- for (i = PT_PAGE_TABLE_LEVEL;
|
|
|
- i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
|
+ for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
rmapp = __gfn_to_rmap(gfn, i, slot);
|
|
|
write_protected |= __rmap_write_protect(kvm, rmapp, true);
|
|
|
}
|
|
@@ -1451,7 +1447,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
|
|
|
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
|
|
|
|
|
|
for (j = PT_PAGE_TABLE_LEVEL;
|
|
|
- j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
|
|
|
+ j <= PT_MAX_HUGEPAGE_LEVEL; ++j) {
|
|
|
unsigned long idx, idx_end;
|
|
|
unsigned long *rmapp;
|
|
|
gfn_t gfn = gfn_start;
|
|
@@ -4416,8 +4412,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
|
|
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
|
|
|
- for (i = PT_PAGE_TABLE_LEVEL;
|
|
|
- i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
|
+ for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
unsigned long *rmapp;
|
|
|
unsigned long last_index, index;
|
|
|
|
|
@@ -4573,8 +4568,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
|
|
|
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
|
|
|
- for (i = PT_PAGE_TABLE_LEVEL + 1; /* skip rmap for 4K page */
|
|
|
- i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
|
+ /* skip rmap for 4K page */
|
|
|
+ for (i = PT_PAGE_TABLE_LEVEL + 1; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
unsigned long *rmapp;
|
|
|
unsigned long last_index, index;
|
|
|
|
|
@@ -4611,8 +4606,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
|
|
|
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
|
|
|
- for (i = PT_PAGE_TABLE_LEVEL;
|
|
|
- i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
|
|
|
+ for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
unsigned long *rmapp;
|
|
|
unsigned long last_index, index;
|
|
|
|