|
@@ -570,11 +570,14 @@ out:
|
|
|
static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
|
|
struct userfaultfd_wait_queue *ewq)
|
|
|
{
|
|
|
+ struct userfaultfd_ctx *release_new_ctx;
|
|
|
+
|
|
|
if (WARN_ON_ONCE(current->flags & PF_EXITING))
|
|
|
goto out;
|
|
|
|
|
|
ewq->ctx = ctx;
|
|
|
init_waitqueue_entry(&ewq->wq, current);
|
|
|
+ release_new_ctx = NULL;
|
|
|
|
|
|
spin_lock(&ctx->event_wqh.lock);
|
|
|
/*
|
|
@@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
|
|
new = (struct userfaultfd_ctx *)
|
|
|
(unsigned long)
|
|
|
ewq->msg.arg.reserved.reserved1;
|
|
|
-
|
|
|
- userfaultfd_ctx_put(new);
|
|
|
+ release_new_ctx = new;
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
@@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
spin_unlock(&ctx->event_wqh.lock);
|
|
|
|
|
|
+ if (release_new_ctx) {
|
|
|
+ struct vm_area_struct *vma;
|
|
|
+ struct mm_struct *mm = release_new_ctx->mm;
|
|
|
+
|
|
|
+ /* the various vma->vm_userfaultfd_ctx still points to it */
|
|
|
+ down_write(&mm->mmap_sem);
|
|
|
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
|
|
|
+ if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
|
|
|
+ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
|
|
+ up_write(&mm->mmap_sem);
|
|
|
+
|
|
|
+ userfaultfd_ctx_put(release_new_ctx);
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* ctx may go away after this if the userfault pseudo fd is
|
|
|
* already released.
|