|
@@ -380,15 +380,44 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int task_early_kill(struct task_struct *tsk, int force_early)
|
|
|
+/*
|
|
|
+ * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
|
|
|
+ * on behalf of the thread group. Return task_struct of the (first found)
|
|
|
+ * dedicated thread if found, and return NULL otherwise.
|
|
|
+ *
|
|
|
+ * We already hold read_lock(&tasklist_lock) in the caller, so we don't
|
|
|
+ * have to call rcu_read_lock/unlock() in this function.
|
|
|
+ */
|
|
|
+static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ struct task_struct *t;
|
|
|
+
|
|
|
+ for_each_thread(tsk, t)
|
|
|
+ if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
|
|
|
+ return t;
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Determine whether a given process is "early kill" process which expects
|
|
|
+ * to be signaled when some page under the process is hwpoisoned.
|
|
|
+ * Return task_struct of the dedicated thread (main thread unless explicitly
|
|
|
+ * specified) if the process is "early kill," and otherwise returns NULL.
|
|
|
+ */
|
|
|
+static struct task_struct *task_early_kill(struct task_struct *tsk,
|
|
|
+ int force_early)
|
|
|
{
|
|
|
+ struct task_struct *t;
|
|
|
if (!tsk->mm)
|
|
|
- return 0;
|
|
|
+ return NULL;
|
|
|
if (force_early)
|
|
|
- return 1;
|
|
|
- if (tsk->flags & PF_MCE_PROCESS)
|
|
|
- return !!(tsk->flags & PF_MCE_EARLY);
|
|
|
- return sysctl_memory_failure_early_kill;
|
|
|
+ return tsk;
|
|
|
+ t = find_early_kill_thread(tsk);
|
|
|
+ if (t)
|
|
|
+ return t;
|
|
|
+ if (sysctl_memory_failure_early_kill)
|
|
|
+ return tsk;
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -410,16 +439,17 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|
|
read_lock(&tasklist_lock);
|
|
|
for_each_process (tsk) {
|
|
|
struct anon_vma_chain *vmac;
|
|
|
+ struct task_struct *t = task_early_kill(tsk, force_early);
|
|
|
|
|
|
- if (!task_early_kill(tsk, force_early))
|
|
|
+ if (!t)
|
|
|
continue;
|
|
|
anon_vma_interval_tree_foreach(vmac, &av->rb_root,
|
|
|
pgoff, pgoff) {
|
|
|
vma = vmac->vma;
|
|
|
if (!page_mapped_in_vma(page, vma))
|
|
|
continue;
|
|
|
- if (vma->vm_mm == tsk->mm)
|
|
|
- add_to_kill(tsk, page, vma, to_kill, tkc);
|
|
|
+ if (vma->vm_mm == t->mm)
|
|
|
+ add_to_kill(t, page, vma, to_kill, tkc);
|
|
|
}
|
|
|
}
|
|
|
read_unlock(&tasklist_lock);
|
|
@@ -440,10 +470,10 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|
|
read_lock(&tasklist_lock);
|
|
|
for_each_process(tsk) {
|
|
|
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
|
|
+ struct task_struct *t = task_early_kill(tsk, force_early);
|
|
|
|
|
|
- if (!task_early_kill(tsk, force_early))
|
|
|
+ if (!t)
|
|
|
continue;
|
|
|
-
|
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
|
|
|
pgoff) {
|
|
|
/*
|
|
@@ -453,8 +483,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|
|
* Assume applications who requested early kill want
|
|
|
* to be informed of all such data corruptions.
|
|
|
*/
|
|
|
- if (vma->vm_mm == tsk->mm)
|
|
|
- add_to_kill(tsk, page, vma, to_kill, tkc);
|
|
|
+ if (vma->vm_mm == t->mm)
|
|
|
+ add_to_kill(t, page, vma, to_kill, tkc);
|
|
|
}
|
|
|
}
|
|
|
read_unlock(&tasklist_lock);
|