|
@@ -170,81 +170,18 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Returns the mm_struct corresponding to the context ctx via ctx->pid
|
|
|
|
- * In case the task has exited we use the task group leader accessible
|
|
|
|
- * via ctx->glpid to find the next task in the thread group that has a
|
|
|
|
- * valid mm_struct associated with it. If a task with valid mm_struct
|
|
|
|
- * is found the ctx->pid is updated to use the task struct for subsequent
|
|
|
|
- * translations. In case no valid mm_struct is found in the task group to
|
|
|
|
- * service the fault a NULL is returned.
|
|
|
|
|
|
+ * Returns the mm_struct corresponding to the context ctx.
|
|
|
|
+ * mm_users == 0, the context may be in the process of being closed.
|
|
*/
|
|
*/
|
|
static struct mm_struct *get_mem_context(struct cxl_context *ctx)
|
|
static struct mm_struct *get_mem_context(struct cxl_context *ctx)
|
|
{
|
|
{
|
|
- struct task_struct *task = NULL;
|
|
|
|
- struct mm_struct *mm = NULL;
|
|
|
|
- struct pid *old_pid = ctx->pid;
|
|
|
|
-
|
|
|
|
- if (old_pid == NULL) {
|
|
|
|
- pr_warn("%s: Invalid context for pe=%d\n",
|
|
|
|
- __func__, ctx->pe);
|
|
|
|
|
|
+ if (ctx->mm == NULL)
|
|
return NULL;
|
|
return NULL;
|
|
- }
|
|
|
|
-
|
|
|
|
- task = get_pid_task(old_pid, PIDTYPE_PID);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * pid_alive may look racy but this saves us from costly
|
|
|
|
- * get_task_mm when the task is a zombie. In worst case
|
|
|
|
- * we may think a task is alive, which is about to die
|
|
|
|
- * but get_task_mm will return NULL.
|
|
|
|
- */
|
|
|
|
- if (task != NULL && pid_alive(task))
|
|
|
|
- mm = get_task_mm(task);
|
|
|
|
|
|
|
|
- /* release the task struct that was taken earlier */
|
|
|
|
- if (task)
|
|
|
|
- put_task_struct(task);
|
|
|
|
- else
|
|
|
|
- pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
|
|
|
|
- __func__, pid_nr(old_pid), ctx->pe);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If we couldn't find the mm context then use the group
|
|
|
|
- * leader to iterate over the task group and find a task
|
|
|
|
- * that gives us mm_struct.
|
|
|
|
- */
|
|
|
|
- if (unlikely(mm == NULL && ctx->glpid != NULL)) {
|
|
|
|
-
|
|
|
|
- rcu_read_lock();
|
|
|
|
- task = pid_task(ctx->glpid, PIDTYPE_PID);
|
|
|
|
- if (task)
|
|
|
|
- do {
|
|
|
|
- mm = get_task_mm(task);
|
|
|
|
- if (mm) {
|
|
|
|
- ctx->pid = get_task_pid(task,
|
|
|
|
- PIDTYPE_PID);
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- task = next_thread(task);
|
|
|
|
- } while (task && !thread_group_leader(task));
|
|
|
|
- rcu_read_unlock();
|
|
|
|
-
|
|
|
|
- /* check if we switched pid */
|
|
|
|
- if (ctx->pid != old_pid) {
|
|
|
|
- if (mm)
|
|
|
|
- pr_devel("%s:pe=%i switch pid %i->%i\n",
|
|
|
|
- __func__, ctx->pe, pid_nr(old_pid),
|
|
|
|
- pid_nr(ctx->pid));
|
|
|
|
- else
|
|
|
|
- pr_devel("%s:Cannot find mm for pid=%i\n",
|
|
|
|
- __func__, pid_nr(old_pid));
|
|
|
|
-
|
|
|
|
- /* drop the reference to older pid */
|
|
|
|
- put_pid(old_pid);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ if (!atomic_inc_not_zero(&ctx->mm->mm_users))
|
|
|
|
+ return NULL;
|
|
|
|
|
|
- return mm;
|
|
|
|
|
|
+ return ctx->mm;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -282,7 +219,6 @@ void cxl_handle_fault(struct work_struct *fault_work)
|
|
if (!ctx->kernel) {
|
|
if (!ctx->kernel) {
|
|
|
|
|
|
mm = get_mem_context(ctx);
|
|
mm = get_mem_context(ctx);
|
|
- /* indicates all the thread in task group have exited */
|
|
|
|
if (mm == NULL) {
|
|
if (mm == NULL) {
|
|
pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
|
|
pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
|
|
__func__, ctx->pe, pid_nr(ctx->pid));
|
|
__func__, ctx->pe, pid_nr(ctx->pid));
|