|
@@ -469,7 +469,6 @@ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_MMU
|
|
#ifdef CONFIG_MMU
|
|
/*
|
|
/*
|
|
* OOM Reaper kernel thread which tries to reap the memory used by the OOM
|
|
* OOM Reaper kernel thread which tries to reap the memory used by the OOM
|
|
@@ -480,16 +479,54 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
|
|
static struct task_struct *oom_reaper_list;
|
|
static struct task_struct *oom_reaper_list;
|
|
static DEFINE_SPINLOCK(oom_reaper_lock);
|
|
static DEFINE_SPINLOCK(oom_reaper_lock);
|
|
|
|
|
|
-static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
|
|
|
|
|
|
+void __oom_reap_task_mm(struct mm_struct *mm)
|
|
{
|
|
{
|
|
- struct mmu_gather tlb;
|
|
|
|
struct vm_area_struct *vma;
|
|
struct vm_area_struct *vma;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Tell all users of get_user/copy_from_user etc... that the content
|
|
|
|
+ * is no longer stable. No barriers really needed because unmapping
|
|
|
|
+ * should imply barriers already and the reader would hit a page fault
|
|
|
|
+ * if it stumbled over a reaped memory.
|
|
|
|
+ */
|
|
|
|
+ set_bit(MMF_UNSTABLE, &mm->flags);
|
|
|
|
+
|
|
|
|
+ for (vma = mm->mmap ; vma; vma = vma->vm_next) {
|
|
|
|
+ if (!can_madv_dontneed_vma(vma))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Only anonymous pages have a good chance to be dropped
|
|
|
|
+ * without additional steps which we cannot afford as we
|
|
|
|
+ * are OOM already.
|
|
|
|
+ *
|
|
|
|
+ * We do not even care about fs backed pages because all
|
|
|
|
+ * which are reclaimable have already been reclaimed and
|
|
|
|
+ * we do not want to block exit_mmap by keeping mm ref
|
|
|
|
+ * count elevated without a good reason.
|
|
|
|
+ */
|
|
|
|
+ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
|
|
|
|
+ const unsigned long start = vma->vm_start;
|
|
|
|
+ const unsigned long end = vma->vm_end;
|
|
|
|
+ struct mmu_gather tlb;
|
|
|
|
+
|
|
|
|
+ tlb_gather_mmu(&tlb, mm, start, end);
|
|
|
|
+ mmu_notifier_invalidate_range_start(mm, start, end);
|
|
|
|
+ unmap_page_range(&tlb, vma, start, end, NULL);
|
|
|
|
+ mmu_notifier_invalidate_range_end(mm, start, end);
|
|
|
|
+ tlb_finish_mmu(&tlb, start, end);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
|
|
|
|
+{
|
|
bool ret = true;
|
|
bool ret = true;
|
|
|
|
|
|
/*
|
|
/*
|
|
* We have to make sure to not race with the victim exit path
|
|
* We have to make sure to not race with the victim exit path
|
|
* and cause premature new oom victim selection:
|
|
* and cause premature new oom victim selection:
|
|
- * __oom_reap_task_mm exit_mm
|
|
|
|
|
|
+ * oom_reap_task_mm exit_mm
|
|
* mmget_not_zero
|
|
* mmget_not_zero
|
|
* mmput
|
|
* mmput
|
|
* atomic_dec_and_test
|
|
* atomic_dec_and_test
|
|
@@ -534,39 +571,8 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
|
|
|
|
|
|
trace_start_task_reaping(tsk->pid);
|
|
trace_start_task_reaping(tsk->pid);
|
|
|
|
|
|
- /*
|
|
|
|
- * Tell all users of get_user/copy_from_user etc... that the content
|
|
|
|
- * is no longer stable. No barriers really needed because unmapping
|
|
|
|
- * should imply barriers already and the reader would hit a page fault
|
|
|
|
- * if it stumbled over a reaped memory.
|
|
|
|
- */
|
|
|
|
- set_bit(MMF_UNSTABLE, &mm->flags);
|
|
|
|
-
|
|
|
|
- for (vma = mm->mmap ; vma; vma = vma->vm_next) {
|
|
|
|
- if (!can_madv_dontneed_vma(vma))
|
|
|
|
- continue;
|
|
|
|
|
|
+ __oom_reap_task_mm(mm);
|
|
|
|
|
|
- /*
|
|
|
|
- * Only anonymous pages have a good chance to be dropped
|
|
|
|
- * without additional steps which we cannot afford as we
|
|
|
|
- * are OOM already.
|
|
|
|
- *
|
|
|
|
- * We do not even care about fs backed pages because all
|
|
|
|
- * which are reclaimable have already been reclaimed and
|
|
|
|
- * we do not want to block exit_mmap by keeping mm ref
|
|
|
|
- * count elevated without a good reason.
|
|
|
|
- */
|
|
|
|
- if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
|
|
|
|
- const unsigned long start = vma->vm_start;
|
|
|
|
- const unsigned long end = vma->vm_end;
|
|
|
|
-
|
|
|
|
- tlb_gather_mmu(&tlb, mm, start, end);
|
|
|
|
- mmu_notifier_invalidate_range_start(mm, start, end);
|
|
|
|
- unmap_page_range(&tlb, vma, start, end, NULL);
|
|
|
|
- mmu_notifier_invalidate_range_end(mm, start, end);
|
|
|
|
- tlb_finish_mmu(&tlb, start, end);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
|
|
pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
|
|
task_pid_nr(tsk), tsk->comm,
|
|
task_pid_nr(tsk), tsk->comm,
|
|
K(get_mm_counter(mm, MM_ANONPAGES)),
|
|
K(get_mm_counter(mm, MM_ANONPAGES)),
|
|
@@ -587,14 +593,13 @@ static void oom_reap_task(struct task_struct *tsk)
|
|
struct mm_struct *mm = tsk->signal->oom_mm;
|
|
struct mm_struct *mm = tsk->signal->oom_mm;
|
|
|
|
|
|
/* Retry the down_read_trylock(mmap_sem) a few times */
|
|
/* Retry the down_read_trylock(mmap_sem) a few times */
|
|
- while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
|
|
|
|
|
|
+ while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
|
|
schedule_timeout_idle(HZ/10);
|
|
schedule_timeout_idle(HZ/10);
|
|
|
|
|
|
if (attempts <= MAX_OOM_REAP_RETRIES ||
|
|
if (attempts <= MAX_OOM_REAP_RETRIES ||
|
|
test_bit(MMF_OOM_SKIP, &mm->flags))
|
|
test_bit(MMF_OOM_SKIP, &mm->flags))
|
|
goto done;
|
|
goto done;
|
|
|
|
|
|
-
|
|
|
|
pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
|
|
pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
|
|
task_pid_nr(tsk), tsk->comm);
|
|
task_pid_nr(tsk), tsk->comm);
|
|
debug_show_all_locks();
|
|
debug_show_all_locks();
|