|
@@ -28,6 +28,21 @@
|
|
|
#include "async_pf.h"
|
|
|
#include <trace/events/kvm.h>
|
|
|
|
|
|
+static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_async_pf *work)
|
|
|
+{
|
|
|
+#ifdef CONFIG_KVM_ASYNC_PF_SYNC
|
|
|
+ kvm_arch_async_page_present(vcpu, work);
|
|
|
+#endif
|
|
|
+}
|
|
|
+static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_async_pf *work)
|
|
|
+{
|
|
|
+#ifndef CONFIG_KVM_ASYNC_PF_SYNC
|
|
|
+ kvm_arch_async_page_present(vcpu, work);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
static struct kmem_cache *async_pf_cache;
|
|
|
|
|
|
int kvm_async_pf_init(void)
|
|
@@ -69,6 +84,7 @@ static void async_pf_execute(struct work_struct *work)
|
|
|
down_read(&mm->mmap_sem);
|
|
|
get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
|
|
|
up_read(&mm->mmap_sem);
|
|
|
+ kvm_async_page_present_sync(vcpu, apf);
|
|
|
unuse_mm(mm);
|
|
|
|
|
|
spin_lock(&vcpu->async_pf.lock);
|
|
@@ -138,7 +154,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
|
|
|
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
|
|
|
struct kvm_arch_async_pf *arch)
|
|
|
{
|
|
|
struct kvm_async_pf *work;
|
|
@@ -159,7 +175,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
|
|
|
work->wakeup_all = false;
|
|
|
work->vcpu = vcpu;
|
|
|
work->gva = gva;
|
|
|
- work->addr = gfn_to_hva(vcpu->kvm, gfn);
|
|
|
+ work->addr = hva;
|
|
|
work->arch = *arch;
|
|
|
work->mm = current->mm;
|
|
|
atomic_inc(&work->mm->mm_count);
|