|
@@ -101,7 +101,7 @@ static void async_pf_execute(struct work_struct *work)
|
|
if (waitqueue_active(&vcpu->wq))
|
|
if (waitqueue_active(&vcpu->wq))
|
|
wake_up_interruptible(&vcpu->wq);
|
|
wake_up_interruptible(&vcpu->wq);
|
|
|
|
|
|
- mmdrop(mm);
|
|
|
|
|
|
+ mmput(mm);
|
|
kvm_put_kvm(vcpu->kvm);
|
|
kvm_put_kvm(vcpu->kvm);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -118,7 +118,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
|
|
flush_work(&work->work);
|
|
flush_work(&work->work);
|
|
#else
|
|
#else
|
|
if (cancel_work_sync(&work->work)) {
|
|
if (cancel_work_sync(&work->work)) {
|
|
- mmdrop(work->mm);
|
|
|
|
|
|
+ mmput(work->mm);
|
|
kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
|
|
kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
|
|
kmem_cache_free(async_pf_cache, work);
|
|
kmem_cache_free(async_pf_cache, work);
|
|
}
|
|
}
|
|
@@ -183,7 +183,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
|
|
work->addr = hva;
|
|
work->addr = hva;
|
|
work->arch = *arch;
|
|
work->arch = *arch;
|
|
work->mm = current->mm;
|
|
work->mm = current->mm;
|
|
- atomic_inc(&work->mm->mm_count);
|
|
|
|
|
|
+ atomic_inc(&work->mm->mm_users);
|
|
kvm_get_kvm(work->vcpu->kvm);
|
|
kvm_get_kvm(work->vcpu->kvm);
|
|
|
|
|
|
/* this can't really happen otherwise gfn_to_pfn_async
|
|
/* this can't really happen otherwise gfn_to_pfn_async
|
|
@@ -201,7 +201,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
|
|
return 1;
|
|
return 1;
|
|
retry_sync:
|
|
retry_sync:
|
|
kvm_put_kvm(work->vcpu->kvm);
|
|
kvm_put_kvm(work->vcpu->kvm);
|
|
- mmdrop(work->mm);
|
|
|
|
|
|
+ mmput(work->mm);
|
|
kmem_cache_free(async_pf_cache, work);
|
|
kmem_cache_free(async_pf_cache, work);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|