|
@@ -380,10 +380,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int task_early_kill(struct task_struct *tsk)
|
|
|
+static int task_early_kill(struct task_struct *tsk, int force_early)
|
|
|
{
|
|
|
if (!tsk->mm)
|
|
|
return 0;
|
|
|
+ if (force_early)
|
|
|
+ return 1;
|
|
|
if (tsk->flags & PF_MCE_PROCESS)
|
|
|
return !!(tsk->flags & PF_MCE_EARLY);
|
|
|
return sysctl_memory_failure_early_kill;
|
|
@@ -393,7 +395,7 @@ static int task_early_kill(struct task_struct *tsk)
|
|
|
* Collect processes when the error hit an anonymous page.
|
|
|
*/
|
|
|
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|
|
- struct to_kill **tkc)
|
|
|
+ struct to_kill **tkc, int force_early)
|
|
|
{
|
|
|
struct vm_area_struct *vma;
|
|
|
struct task_struct *tsk;
|
|
@@ -409,7 +411,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|
|
for_each_process (tsk) {
|
|
|
struct anon_vma_chain *vmac;
|
|
|
|
|
|
- if (!task_early_kill(tsk))
|
|
|
+ if (!task_early_kill(tsk, force_early))
|
|
|
continue;
|
|
|
anon_vma_interval_tree_foreach(vmac, &av->rb_root,
|
|
|
pgoff, pgoff) {
|
|
@@ -428,7 +430,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|
|
* Collect processes when the error hit a file mapped page.
|
|
|
*/
|
|
|
static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|
|
- struct to_kill **tkc)
|
|
|
+ struct to_kill **tkc, int force_early)
|
|
|
{
|
|
|
struct vm_area_struct *vma;
|
|
|
struct task_struct *tsk;
|
|
@@ -439,7 +441,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|
|
for_each_process(tsk) {
|
|
|
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
|
|
|
|
|
- if (!task_early_kill(tsk))
|
|
|
+ if (!task_early_kill(tsk, force_early))
|
|
|
continue;
|
|
|
|
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
|
|
@@ -465,7 +467,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|
|
* First preallocate one tokill structure outside the spin locks,
|
|
|
* so that we can kill at least one process reasonably reliable.
|
|
|
*/
|
|
|
-static void collect_procs(struct page *page, struct list_head *tokill)
|
|
|
+static void collect_procs(struct page *page, struct list_head *tokill,
|
|
|
+ int force_early)
|
|
|
{
|
|
|
struct to_kill *tk;
|
|
|
|
|
@@ -476,9 +479,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
|
|
|
if (!tk)
|
|
|
return;
|
|
|
if (PageAnon(page))
|
|
|
- collect_procs_anon(page, tokill, &tk);
|
|
|
+ collect_procs_anon(page, tokill, &tk, force_early);
|
|
|
else
|
|
|
- collect_procs_file(page, tokill, &tk);
|
|
|
+ collect_procs_file(page, tokill, &tk, force_early);
|
|
|
kfree(tk);
|
|
|
}
|
|
|
|
|
@@ -963,7 +966,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|
|
* there's nothing that can be done.
|
|
|
*/
|
|
|
if (kill)
|
|
|
- collect_procs(ppage, &tokill);
|
|
|
+ collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
|
|
|
|
|
|
ret = try_to_unmap(ppage, ttu);
|
|
|
if (ret != SWAP_SUCCESS)
|