|
@@ -1111,3 +1111,49 @@ out:
|
|
|
free_hyp_pgds();
|
|
|
return err;
|
|
|
}
|
|
|
+
|
|
|
+void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|
|
+ struct kvm_userspace_memory_region *mem,
|
|
|
+ const struct kvm_memory_slot *old,
|
|
|
+ enum kvm_mr_change change)
|
|
|
+{
|
|
|
+ gpa_t gpa = old->base_gfn << PAGE_SHIFT;
|
|
|
+ phys_addr_t size = old->npages << PAGE_SHIFT;
|
|
|
+ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
+ unmap_stage2_range(kvm, gpa, size);
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
|
+ struct kvm_memory_slot *memslot,
|
|
|
+ struct kvm_userspace_memory_region *mem,
|
|
|
+ enum kvm_mr_change change)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
|
|
+ struct kvm_memory_slot *dont)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
|
+ unsigned long npages)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void kvm_arch_memslots_updated(struct kvm *kvm)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
|
|
+ struct kvm_memory_slot *slot)
|
|
|
+{
|
|
|
+}
|