oom_kill.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139
  1. /*
  2. * linux/mm/oom_kill.c
  3. *
  4. * Copyright (C) 1998,2000 Rik van Riel
  5. * Thanks go out to Claus Fischer for some serious inspiration and
  6. * for goading me into coding this file...
  7. * Copyright (C) 2010 Google, Inc.
  8. * Rewritten by David Rientjes
  9. *
  10. * The routines in this file are used to kill a process when
  11. * we're seriously out of memory. This gets called from __alloc_pages()
  12. * in mm/page_alloc.c when we really run out of memory.
  13. *
  14. * Since we won't call these routines often (on a well-configured
  15. * machine) this file will double as a 'coding guide' and a signpost
  16. * for newbie kernel hackers. It features several pointers to major
  17. * kernel subsystems and hints as to where to find out what things do.
  18. */
  19. #include <linux/oom.h>
  20. #include <linux/mm.h>
  21. #include <linux/err.h>
  22. #include <linux/gfp.h>
  23. #include <linux/sched.h>
  24. #include <linux/sched/mm.h>
  25. #include <linux/sched/coredump.h>
  26. #include <linux/sched/task.h>
  27. #include <linux/swap.h>
  28. #include <linux/timex.h>
  29. #include <linux/jiffies.h>
  30. #include <linux/cpuset.h>
  31. #include <linux/export.h>
  32. #include <linux/notifier.h>
  33. #include <linux/memcontrol.h>
  34. #include <linux/mempolicy.h>
  35. #include <linux/security.h>
  36. #include <linux/ptrace.h>
  37. #include <linux/freezer.h>
  38. #include <linux/ftrace.h>
  39. #include <linux/ratelimit.h>
  40. #include <linux/kthread.h>
  41. #include <linux/init.h>
  42. #include <linux/mmu_notifier.h>
  43. #include <asm/tlb.h>
  44. #include "internal.h"
  45. #include "slab.h"
  46. #define CREATE_TRACE_POINTS
  47. #include <trace/events/oom.h>
  48. int sysctl_panic_on_oom;
  49. int sysctl_oom_kill_allocating_task;
  50. int sysctl_oom_dump_tasks = 1;
  51. /*
  52. * Serializes oom killer invocations (out_of_memory()) from all contexts to
  53. * prevent from over eager oom killing (e.g. when the oom killer is invoked
  54. * from different domains).
  55. *
  56. * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
  57. * and mark_oom_victim
  58. */
  59. DEFINE_MUTEX(oom_lock);
  60. #ifdef CONFIG_NUMA
  61. /**
  62. * has_intersects_mems_allowed() - check task eligiblity for kill
  63. * @start: task struct of which task to consider
  64. * @mask: nodemask passed to page allocator for mempolicy ooms
  65. *
  66. * Task eligibility is determined by whether or not a candidate task, @tsk,
  67. * shares the same mempolicy nodes as current if it is bound by such a policy
  68. * and whether or not it has the same set of allowed cpuset nodes.
  69. */
  70. static bool has_intersects_mems_allowed(struct task_struct *start,
  71. const nodemask_t *mask)
  72. {
  73. struct task_struct *tsk;
  74. bool ret = false;
  75. rcu_read_lock();
  76. for_each_thread(start, tsk) {
  77. if (mask) {
  78. /*
  79. * If this is a mempolicy constrained oom, tsk's
  80. * cpuset is irrelevant. Only return true if its
  81. * mempolicy intersects current, otherwise it may be
  82. * needlessly killed.
  83. */
  84. ret = mempolicy_nodemask_intersects(tsk, mask);
  85. } else {
  86. /*
  87. * This is not a mempolicy constrained oom, so only
  88. * check the mems of tsk's cpuset.
  89. */
  90. ret = cpuset_mems_allowed_intersects(current, tsk);
  91. }
  92. if (ret)
  93. break;
  94. }
  95. rcu_read_unlock();
  96. return ret;
  97. }
  98. #else
  99. static bool has_intersects_mems_allowed(struct task_struct *tsk,
  100. const nodemask_t *mask)
  101. {
  102. return true;
  103. }
  104. #endif /* CONFIG_NUMA */
  105. /*
  106. * The process p may have detached its own ->mm while exiting or through
  107. * use_mm(), but one or more of its subthreads may still have a valid
  108. * pointer. Return p, or any of its subthreads with a valid ->mm, with
  109. * task_lock() held.
  110. */
  111. struct task_struct *find_lock_task_mm(struct task_struct *p)
  112. {
  113. struct task_struct *t;
  114. rcu_read_lock();
  115. for_each_thread(p, t) {
  116. task_lock(t);
  117. if (likely(t->mm))
  118. goto found;
  119. task_unlock(t);
  120. }
  121. t = NULL;
  122. found:
  123. rcu_read_unlock();
  124. return t;
  125. }
  126. /*
  127. * order == -1 means the oom kill is required by sysrq, otherwise only
  128. * for display purposes.
  129. */
  130. static inline bool is_sysrq_oom(struct oom_control *oc)
  131. {
  132. return oc->order == -1;
  133. }
  134. static inline bool is_memcg_oom(struct oom_control *oc)
  135. {
  136. return oc->memcg != NULL;
  137. }
  138. /* return true if the task is not adequate as candidate victim task. */
  139. static bool oom_unkillable_task(struct task_struct *p,
  140. struct mem_cgroup *memcg, const nodemask_t *nodemask)
  141. {
  142. if (is_global_init(p))
  143. return true;
  144. if (p->flags & PF_KTHREAD)
  145. return true;
  146. /* When mem_cgroup_out_of_memory() and p is not member of the group */
  147. if (memcg && !task_in_mem_cgroup(p, memcg))
  148. return true;
  149. /* p may not have freeable memory in nodemask */
  150. if (!has_intersects_mems_allowed(p, nodemask))
  151. return true;
  152. return false;
  153. }
  154. /*
  155. * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
  156. * than all user memory (LRU pages)
  157. */
  158. static bool is_dump_unreclaim_slabs(void)
  159. {
  160. unsigned long nr_lru;
  161. nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
  162. global_node_page_state(NR_INACTIVE_ANON) +
  163. global_node_page_state(NR_ACTIVE_FILE) +
  164. global_node_page_state(NR_INACTIVE_FILE) +
  165. global_node_page_state(NR_ISOLATED_ANON) +
  166. global_node_page_state(NR_ISOLATED_FILE) +
  167. global_node_page_state(NR_UNEVICTABLE);
  168. return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
  169. }
  170. /**
  171. * oom_badness - heuristic function to determine which candidate task to kill
  172. * @p: task struct of which task we should calculate
  173. * @totalpages: total present RAM allowed for page allocation
  174. * @memcg: task's memory controller, if constrained
  175. * @nodemask: nodemask passed to page allocator for mempolicy ooms
  176. *
  177. * The heuristic for determining which task to kill is made to be as simple and
  178. * predictable as possible. The goal is to return the highest value for the
  179. * task consuming the most memory to avoid subsequent oom failures.
  180. */
  181. unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
  182. const nodemask_t *nodemask, unsigned long totalpages)
  183. {
  184. long points;
  185. long adj;
  186. if (oom_unkillable_task(p, memcg, nodemask))
  187. return 0;
  188. p = find_lock_task_mm(p);
  189. if (!p)
  190. return 0;
  191. /*
  192. * Do not even consider tasks which are explicitly marked oom
  193. * unkillable or have been already oom reaped or the are in
  194. * the middle of vfork
  195. */
  196. adj = (long)p->signal->oom_score_adj;
  197. if (adj == OOM_SCORE_ADJ_MIN ||
  198. test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
  199. in_vfork(p)) {
  200. task_unlock(p);
  201. return 0;
  202. }
  203. /*
  204. * The baseline for the badness score is the proportion of RAM that each
  205. * task's rss, pagetable and swap space use.
  206. */
  207. points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
  208. mm_pgtables_bytes(p->mm) / PAGE_SIZE;
  209. task_unlock(p);
  210. /* Normalize to oom_score_adj units */
  211. adj *= totalpages / 1000;
  212. points += adj;
  213. /*
  214. * Never return 0 for an eligible task regardless of the root bonus and
  215. * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
  216. */
  217. return points > 0 ? points : 1;
  218. }
  219. enum oom_constraint {
  220. CONSTRAINT_NONE,
  221. CONSTRAINT_CPUSET,
  222. CONSTRAINT_MEMORY_POLICY,
  223. CONSTRAINT_MEMCG,
  224. };
  225. /*
  226. * Determine the type of allocation constraint.
  227. */
  228. static enum oom_constraint constrained_alloc(struct oom_control *oc)
  229. {
  230. struct zone *zone;
  231. struct zoneref *z;
  232. enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
  233. bool cpuset_limited = false;
  234. int nid;
  235. if (is_memcg_oom(oc)) {
  236. oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
  237. return CONSTRAINT_MEMCG;
  238. }
  239. /* Default to all available memory */
  240. oc->totalpages = totalram_pages + total_swap_pages;
  241. if (!IS_ENABLED(CONFIG_NUMA))
  242. return CONSTRAINT_NONE;
  243. if (!oc->zonelist)
  244. return CONSTRAINT_NONE;
  245. /*
  246. * Reach here only when __GFP_NOFAIL is used. So, we should avoid
  247. * to kill current.We have to random task kill in this case.
  248. * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
  249. */
  250. if (oc->gfp_mask & __GFP_THISNODE)
  251. return CONSTRAINT_NONE;
  252. /*
  253. * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
  254. * the page allocator means a mempolicy is in effect. Cpuset policy
  255. * is enforced in get_page_from_freelist().
  256. */
  257. if (oc->nodemask &&
  258. !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
  259. oc->totalpages = total_swap_pages;
  260. for_each_node_mask(nid, *oc->nodemask)
  261. oc->totalpages += node_spanned_pages(nid);
  262. return CONSTRAINT_MEMORY_POLICY;
  263. }
  264. /* Check this allocation failure is caused by cpuset's wall function */
  265. for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
  266. high_zoneidx, oc->nodemask)
  267. if (!cpuset_zone_allowed(zone, oc->gfp_mask))
  268. cpuset_limited = true;
  269. if (cpuset_limited) {
  270. oc->totalpages = total_swap_pages;
  271. for_each_node_mask(nid, cpuset_current_mems_allowed)
  272. oc->totalpages += node_spanned_pages(nid);
  273. return CONSTRAINT_CPUSET;
  274. }
  275. return CONSTRAINT_NONE;
  276. }
  277. static int oom_evaluate_task(struct task_struct *task, void *arg)
  278. {
  279. struct oom_control *oc = arg;
  280. unsigned long points;
  281. if (oom_unkillable_task(task, NULL, oc->nodemask))
  282. goto next;
  283. /*
  284. * This task already has access to memory reserves and is being killed.
  285. * Don't allow any other task to have access to the reserves unless
  286. * the task has MMF_OOM_SKIP because chances that it would release
  287. * any memory is quite low.
  288. */
  289. if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
  290. if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
  291. goto next;
  292. goto abort;
  293. }
  294. /*
  295. * If task is allocating a lot of memory and has been marked to be
  296. * killed first if it triggers an oom, then select it.
  297. */
  298. if (oom_task_origin(task)) {
  299. points = ULONG_MAX;
  300. goto select;
  301. }
  302. points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
  303. if (!points || points < oc->chosen_points)
  304. goto next;
  305. /* Prefer thread group leaders for display purposes */
  306. if (points == oc->chosen_points && thread_group_leader(oc->chosen))
  307. goto next;
  308. select:
  309. if (oc->chosen)
  310. put_task_struct(oc->chosen);
  311. get_task_struct(task);
  312. oc->chosen = task;
  313. oc->chosen_points = points;
  314. next:
  315. return 0;
  316. abort:
  317. if (oc->chosen)
  318. put_task_struct(oc->chosen);
  319. oc->chosen = (void *)-1UL;
  320. return 1;
  321. }
  322. /*
  323. * Simple selection loop. We choose the process with the highest number of
  324. * 'points'. In case scan was aborted, oc->chosen is set to -1.
  325. */
  326. static void select_bad_process(struct oom_control *oc)
  327. {
  328. if (is_memcg_oom(oc))
  329. mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
  330. else {
  331. struct task_struct *p;
  332. rcu_read_lock();
  333. for_each_process(p)
  334. if (oom_evaluate_task(p, oc))
  335. break;
  336. rcu_read_unlock();
  337. }
  338. oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
  339. }
  340. /**
  341. * dump_tasks - dump current memory state of all system tasks
  342. * @memcg: current's memory controller, if constrained
  343. * @nodemask: nodemask passed to page allocator for mempolicy ooms
  344. *
  345. * Dumps the current memory state of all eligible tasks. Tasks not in the same
  346. * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
  347. * are not shown.
  348. * State information includes task's pid, uid, tgid, vm size, rss,
  349. * pgtables_bytes, swapents, oom_score_adj value, and name.
  350. */
  351. static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
  352. {
  353. struct task_struct *p;
  354. struct task_struct *task;
  355. pr_info("Tasks state (memory values in pages):\n");
  356. pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
  357. rcu_read_lock();
  358. for_each_process(p) {
  359. if (oom_unkillable_task(p, memcg, nodemask))
  360. continue;
  361. task = find_lock_task_mm(p);
  362. if (!task) {
  363. /*
  364. * This is a kthread or all of p's threads have already
  365. * detached their mm's. There's no need to report
  366. * them; they can't be oom killed anyway.
  367. */
  368. continue;
  369. }
  370. pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
  371. task->pid, from_kuid(&init_user_ns, task_uid(task)),
  372. task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
  373. mm_pgtables_bytes(task->mm),
  374. get_mm_counter(task->mm, MM_SWAPENTS),
  375. task->signal->oom_score_adj, task->comm);
  376. task_unlock(task);
  377. }
  378. rcu_read_unlock();
  379. }
  380. static void dump_header(struct oom_control *oc, struct task_struct *p)
  381. {
  382. pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
  383. current->comm, oc->gfp_mask, &oc->gfp_mask,
  384. nodemask_pr_args(oc->nodemask), oc->order,
  385. current->signal->oom_score_adj);
  386. if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
  387. pr_warn("COMPACTION is disabled!!!\n");
  388. cpuset_print_current_mems_allowed();
  389. dump_stack();
  390. if (is_memcg_oom(oc))
  391. mem_cgroup_print_oom_info(oc->memcg, p);
  392. else {
  393. show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
  394. if (is_dump_unreclaim_slabs())
  395. dump_unreclaimable_slab();
  396. }
  397. if (sysctl_oom_dump_tasks)
  398. dump_tasks(oc->memcg, oc->nodemask);
  399. }
  400. /*
  401. * Number of OOM victims in flight
  402. */
  403. static atomic_t oom_victims = ATOMIC_INIT(0);
  404. static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
  405. static bool oom_killer_disabled __read_mostly;
  406. #define K(x) ((x) << (PAGE_SHIFT-10))
  407. /*
  408. * task->mm can be NULL if the task is the exited group leader. So to
  409. * determine whether the task is using a particular mm, we examine all the
  410. * task's threads: if one of those is using this mm then this task was also
  411. * using it.
  412. */
  413. bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
  414. {
  415. struct task_struct *t;
  416. for_each_thread(p, t) {
  417. struct mm_struct *t_mm = READ_ONCE(t->mm);
  418. if (t_mm)
  419. return t_mm == mm;
  420. }
  421. return false;
  422. }
  423. #ifdef CONFIG_MMU
  424. /*
  425. * OOM Reaper kernel thread which tries to reap the memory used by the OOM
  426. * victim (if that is possible) to help the OOM killer to move on.
  427. */
  428. static struct task_struct *oom_reaper_th;
  429. static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
  430. static struct task_struct *oom_reaper_list;
  431. static DEFINE_SPINLOCK(oom_reaper_lock);
  432. bool __oom_reap_task_mm(struct mm_struct *mm)
  433. {
  434. struct vm_area_struct *vma;
  435. bool ret = true;
  436. /*
  437. * Tell all users of get_user/copy_from_user etc... that the content
  438. * is no longer stable. No barriers really needed because unmapping
  439. * should imply barriers already and the reader would hit a page fault
  440. * if it stumbled over a reaped memory.
  441. */
  442. set_bit(MMF_UNSTABLE, &mm->flags);
  443. for (vma = mm->mmap ; vma; vma = vma->vm_next) {
  444. if (!can_madv_dontneed_vma(vma))
  445. continue;
  446. /*
  447. * Only anonymous pages have a good chance to be dropped
  448. * without additional steps which we cannot afford as we
  449. * are OOM already.
  450. *
  451. * We do not even care about fs backed pages because all
  452. * which are reclaimable have already been reclaimed and
  453. * we do not want to block exit_mmap by keeping mm ref
  454. * count elevated without a good reason.
  455. */
  456. if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
  457. const unsigned long start = vma->vm_start;
  458. const unsigned long end = vma->vm_end;
  459. struct mmu_gather tlb;
  460. tlb_gather_mmu(&tlb, mm, start, end);
  461. if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {
  462. ret = false;
  463. continue;
  464. }
  465. unmap_page_range(&tlb, vma, start, end, NULL);
  466. mmu_notifier_invalidate_range_end(mm, start, end);
  467. tlb_finish_mmu(&tlb, start, end);
  468. }
  469. }
  470. return ret;
  471. }
  472. /*
  473. * Reaps the address space of the give task.
  474. *
  475. * Returns true on success and false if none or part of the address space
  476. * has been reclaimed and the caller should retry later.
  477. */
  478. static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
  479. {
  480. bool ret = true;
  481. if (!down_read_trylock(&mm->mmap_sem)) {
  482. trace_skip_task_reaping(tsk->pid);
  483. return false;
  484. }
  485. /*
  486. * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
  487. * work on the mm anymore. The check for MMF_OOM_SKIP must run
  488. * under mmap_sem for reading because it serializes against the
  489. * down_write();up_write() cycle in exit_mmap().
  490. */
  491. if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
  492. trace_skip_task_reaping(tsk->pid);
  493. goto out_unlock;
  494. }
  495. trace_start_task_reaping(tsk->pid);
  496. /* failed to reap part of the address space. Try again later */
  497. ret = __oom_reap_task_mm(mm);
  498. if (!ret)
  499. goto out_finish;
  500. pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
  501. task_pid_nr(tsk), tsk->comm,
  502. K(get_mm_counter(mm, MM_ANONPAGES)),
  503. K(get_mm_counter(mm, MM_FILEPAGES)),
  504. K(get_mm_counter(mm, MM_SHMEMPAGES)));
  505. out_finish:
  506. trace_finish_task_reaping(tsk->pid);
  507. out_unlock:
  508. up_read(&mm->mmap_sem);
  509. return ret;
  510. }
  511. #define MAX_OOM_REAP_RETRIES 10
  512. static void oom_reap_task(struct task_struct *tsk)
  513. {
  514. int attempts = 0;
  515. struct mm_struct *mm = tsk->signal->oom_mm;
  516. /* Retry the down_read_trylock(mmap_sem) a few times */
  517. while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
  518. schedule_timeout_idle(HZ/10);
  519. if (attempts <= MAX_OOM_REAP_RETRIES ||
  520. test_bit(MMF_OOM_SKIP, &mm->flags))
  521. goto done;
  522. pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
  523. task_pid_nr(tsk), tsk->comm);
  524. debug_show_all_locks();
  525. done:
  526. tsk->oom_reaper_list = NULL;
  527. /*
  528. * Hide this mm from OOM killer because it has been either reaped or
  529. * somebody can't call up_write(mmap_sem).
  530. */
  531. set_bit(MMF_OOM_SKIP, &mm->flags);
  532. /* Drop a reference taken by wake_oom_reaper */
  533. put_task_struct(tsk);
  534. }
  535. static int oom_reaper(void *unused)
  536. {
  537. while (true) {
  538. struct task_struct *tsk = NULL;
  539. wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
  540. spin_lock(&oom_reaper_lock);
  541. if (oom_reaper_list != NULL) {
  542. tsk = oom_reaper_list;
  543. oom_reaper_list = tsk->oom_reaper_list;
  544. }
  545. spin_unlock(&oom_reaper_lock);
  546. if (tsk)
  547. oom_reap_task(tsk);
  548. }
  549. return 0;
  550. }
  551. static void wake_oom_reaper(struct task_struct *tsk)
  552. {
  553. /* tsk is already queued? */
  554. if (tsk == oom_reaper_list || tsk->oom_reaper_list)
  555. return;
  556. get_task_struct(tsk);
  557. spin_lock(&oom_reaper_lock);
  558. tsk->oom_reaper_list = oom_reaper_list;
  559. oom_reaper_list = tsk;
  560. spin_unlock(&oom_reaper_lock);
  561. trace_wake_reaper(tsk->pid);
  562. wake_up(&oom_reaper_wait);
  563. }
  564. static int __init oom_init(void)
  565. {
  566. oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
  567. return 0;
  568. }
  569. subsys_initcall(oom_init)
  570. #else
  571. static inline void wake_oom_reaper(struct task_struct *tsk)
  572. {
  573. }
  574. #endif /* CONFIG_MMU */
  575. /**
  576. * mark_oom_victim - mark the given task as OOM victim
  577. * @tsk: task to mark
  578. *
  579. * Has to be called with oom_lock held and never after
  580. * oom has been disabled already.
  581. *
  582. * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
  583. * under task_lock or operate on the current).
  584. */
  585. static void mark_oom_victim(struct task_struct *tsk)
  586. {
  587. struct mm_struct *mm = tsk->mm;
  588. WARN_ON(oom_killer_disabled);
  589. /* OOM killer might race with memcg OOM */
  590. if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
  591. return;
  592. /* oom_mm is bound to the signal struct life time. */
  593. if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
  594. mmgrab(tsk->signal->oom_mm);
  595. set_bit(MMF_OOM_VICTIM, &mm->flags);
  596. }
  597. /*
  598. * Make sure that the task is woken up from uninterruptible sleep
  599. * if it is frozen because OOM killer wouldn't be able to free
  600. * any memory and livelock. freezing_slow_path will tell the freezer
  601. * that TIF_MEMDIE tasks should be ignored.
  602. */
  603. __thaw_task(tsk);
  604. atomic_inc(&oom_victims);
  605. trace_mark_victim(tsk->pid);
  606. }
  607. /**
  608. * exit_oom_victim - note the exit of an OOM victim
  609. */
  610. void exit_oom_victim(void)
  611. {
  612. clear_thread_flag(TIF_MEMDIE);
  613. if (!atomic_dec_return(&oom_victims))
  614. wake_up_all(&oom_victims_wait);
  615. }
  616. /**
  617. * oom_killer_enable - enable OOM killer
  618. */
  619. void oom_killer_enable(void)
  620. {
  621. oom_killer_disabled = false;
  622. pr_info("OOM killer enabled.\n");
  623. }
  624. /**
  625. * oom_killer_disable - disable OOM killer
  626. * @timeout: maximum timeout to wait for oom victims in jiffies
  627. *
  628. * Forces all page allocations to fail rather than trigger OOM killer.
  629. * Will block and wait until all OOM victims are killed or the given
  630. * timeout expires.
  631. *
  632. * The function cannot be called when there are runnable user tasks because
  633. * the userspace would see unexpected allocation failures as a result. Any
  634. * new usage of this function should be consulted with MM people.
  635. *
  636. * Returns true if successful and false if the OOM killer cannot be
  637. * disabled.
  638. */
  639. bool oom_killer_disable(signed long timeout)
  640. {
  641. signed long ret;
  642. /*
  643. * Make sure to not race with an ongoing OOM killer. Check that the
  644. * current is not killed (possibly due to sharing the victim's memory).
  645. */
  646. if (mutex_lock_killable(&oom_lock))
  647. return false;
  648. oom_killer_disabled = true;
  649. mutex_unlock(&oom_lock);
  650. ret = wait_event_interruptible_timeout(oom_victims_wait,
  651. !atomic_read(&oom_victims), timeout);
  652. if (ret <= 0) {
  653. oom_killer_enable();
  654. return false;
  655. }
  656. pr_info("OOM killer disabled.\n");
  657. return true;
  658. }
  659. static inline bool __task_will_free_mem(struct task_struct *task)
  660. {
  661. struct signal_struct *sig = task->signal;
  662. /*
  663. * A coredumping process may sleep for an extended period in exit_mm(),
  664. * so the oom killer cannot assume that the process will promptly exit
  665. * and release memory.
  666. */
  667. if (sig->flags & SIGNAL_GROUP_COREDUMP)
  668. return false;
  669. if (sig->flags & SIGNAL_GROUP_EXIT)
  670. return true;
  671. if (thread_group_empty(task) && (task->flags & PF_EXITING))
  672. return true;
  673. return false;
  674. }
  675. /*
  676. * Checks whether the given task is dying or exiting and likely to
  677. * release its address space. This means that all threads and processes
  678. * sharing the same mm have to be killed or exiting.
  679. * Caller has to make sure that task->mm is stable (hold task_lock or
  680. * it operates on the current).
  681. */
  682. static bool task_will_free_mem(struct task_struct *task)
  683. {
  684. struct mm_struct *mm = task->mm;
  685. struct task_struct *p;
  686. bool ret = true;
  687. /*
  688. * Skip tasks without mm because it might have passed its exit_mm and
  689. * exit_oom_victim. oom_reaper could have rescued that but do not rely
  690. * on that for now. We can consider find_lock_task_mm in future.
  691. */
  692. if (!mm)
  693. return false;
  694. if (!__task_will_free_mem(task))
  695. return false;
  696. /*
  697. * This task has already been drained by the oom reaper so there are
  698. * only small chances it will free some more
  699. */
  700. if (test_bit(MMF_OOM_SKIP, &mm->flags))
  701. return false;
  702. if (atomic_read(&mm->mm_users) <= 1)
  703. return true;
  704. /*
  705. * Make sure that all tasks which share the mm with the given tasks
  706. * are dying as well to make sure that a) nobody pins its mm and
  707. * b) the task is also reapable by the oom reaper.
  708. */
  709. rcu_read_lock();
  710. for_each_process(p) {
  711. if (!process_shares_mm(p, mm))
  712. continue;
  713. if (same_thread_group(task, p))
  714. continue;
  715. ret = __task_will_free_mem(p);
  716. if (!ret)
  717. break;
  718. }
  719. rcu_read_unlock();
  720. return ret;
  721. }
  722. static void __oom_kill_process(struct task_struct *victim)
  723. {
  724. struct task_struct *p;
  725. struct mm_struct *mm;
  726. bool can_oom_reap = true;
  727. p = find_lock_task_mm(victim);
  728. if (!p) {
  729. put_task_struct(victim);
  730. return;
  731. } else if (victim != p) {
  732. get_task_struct(p);
  733. put_task_struct(victim);
  734. victim = p;
  735. }
  736. /* Get a reference to safely compare mm after task_unlock(victim) */
  737. mm = victim->mm;
  738. mmgrab(mm);
  739. /* Raise event before sending signal: task reaper must see this */
  740. count_vm_event(OOM_KILL);
  741. memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
  742. /*
  743. * We should send SIGKILL before granting access to memory reserves
  744. * in order to prevent the OOM victim from depleting the memory
  745. * reserves from the user space under its control.
  746. */
  747. do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, PIDTYPE_TGID);
  748. mark_oom_victim(victim);
  749. pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
  750. task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
  751. K(get_mm_counter(victim->mm, MM_ANONPAGES)),
  752. K(get_mm_counter(victim->mm, MM_FILEPAGES)),
  753. K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
  754. task_unlock(victim);
  755. /*
  756. * Kill all user processes sharing victim->mm in other thread groups, if
  757. * any. They don't get access to memory reserves, though, to avoid
  758. * depletion of all memory. This prevents mm->mmap_sem livelock when an
  759. * oom killed thread cannot exit because it requires the semaphore and
  760. * its contended by another thread trying to allocate memory itself.
  761. * That thread will now get access to memory reserves since it has a
  762. * pending fatal signal.
  763. */
  764. rcu_read_lock();
  765. for_each_process(p) {
  766. if (!process_shares_mm(p, mm))
  767. continue;
  768. if (same_thread_group(p, victim))
  769. continue;
  770. if (is_global_init(p)) {
  771. can_oom_reap = false;
  772. set_bit(MMF_OOM_SKIP, &mm->flags);
  773. pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
  774. task_pid_nr(victim), victim->comm,
  775. task_pid_nr(p), p->comm);
  776. continue;
  777. }
  778. /*
  779. * No use_mm() user needs to read from the userspace so we are
  780. * ok to reap it.
  781. */
  782. if (unlikely(p->flags & PF_KTHREAD))
  783. continue;
  784. do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, PIDTYPE_TGID);
  785. }
  786. rcu_read_unlock();
  787. if (can_oom_reap)
  788. wake_oom_reaper(victim);
  789. mmdrop(mm);
  790. put_task_struct(victim);
  791. }
  792. #undef K
  793. /*
  794. * Kill provided task unless it's secured by setting
  795. * oom_score_adj to OOM_SCORE_ADJ_MIN.
  796. */
  797. static int oom_kill_memcg_member(struct task_struct *task, void *unused)
  798. {
  799. if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
  800. get_task_struct(task);
  801. __oom_kill_process(task);
  802. }
  803. return 0;
  804. }
  805. static void oom_kill_process(struct oom_control *oc, const char *message)
  806. {
  807. struct task_struct *p = oc->chosen;
  808. unsigned int points = oc->chosen_points;
  809. struct task_struct *victim = p;
  810. struct task_struct *child;
  811. struct task_struct *t;
  812. struct mem_cgroup *oom_group;
  813. unsigned int victim_points = 0;
  814. static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
  815. DEFAULT_RATELIMIT_BURST);
  816. /*
  817. * If the task is already exiting, don't alarm the sysadmin or kill
  818. * its children or threads, just give it access to memory reserves
  819. * so it can die quickly
  820. */
  821. task_lock(p);
  822. if (task_will_free_mem(p)) {
  823. mark_oom_victim(p);
  824. wake_oom_reaper(p);
  825. task_unlock(p);
  826. put_task_struct(p);
  827. return;
  828. }
  829. task_unlock(p);
  830. if (__ratelimit(&oom_rs))
  831. dump_header(oc, p);
  832. pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
  833. message, task_pid_nr(p), p->comm, points);
  834. /*
  835. * If any of p's children has a different mm and is eligible for kill,
  836. * the one with the highest oom_badness() score is sacrificed for its
  837. * parent. This attempts to lose the minimal amount of work done while
  838. * still freeing memory.
  839. */
  840. read_lock(&tasklist_lock);
  841. for_each_thread(p, t) {
  842. list_for_each_entry(child, &t->children, sibling) {
  843. unsigned int child_points;
  844. if (process_shares_mm(child, p->mm))
  845. continue;
  846. /*
  847. * oom_badness() returns 0 if the thread is unkillable
  848. */
  849. child_points = oom_badness(child,
  850. oc->memcg, oc->nodemask, oc->totalpages);
  851. if (child_points > victim_points) {
  852. put_task_struct(victim);
  853. victim = child;
  854. victim_points = child_points;
  855. get_task_struct(victim);
  856. }
  857. }
  858. }
  859. read_unlock(&tasklist_lock);
  860. /*
  861. * Do we need to kill the entire memory cgroup?
  862. * Or even one of the ancestor memory cgroups?
  863. * Check this out before killing the victim task.
  864. */
  865. oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
  866. __oom_kill_process(victim);
  867. /*
  868. * If necessary, kill all tasks in the selected memory cgroup.
  869. */
  870. if (oom_group) {
  871. mem_cgroup_print_oom_group(oom_group);
  872. mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member, NULL);
  873. mem_cgroup_put(oom_group);
  874. }
  875. }
  876. /*
  877. * Determines whether the kernel must panic because of the panic_on_oom sysctl.
  878. */
  879. static void check_panic_on_oom(struct oom_control *oc,
  880. enum oom_constraint constraint)
  881. {
  882. if (likely(!sysctl_panic_on_oom))
  883. return;
  884. if (sysctl_panic_on_oom != 2) {
  885. /*
  886. * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
  887. * does not panic for cpuset, mempolicy, or memcg allocation
  888. * failures.
  889. */
  890. if (constraint != CONSTRAINT_NONE)
  891. return;
  892. }
  893. /* Do not panic for oom kills triggered by sysrq */
  894. if (is_sysrq_oom(oc))
  895. return;
  896. dump_header(oc, NULL);
  897. panic("Out of memory: %s panic_on_oom is enabled\n",
  898. sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
  899. }
  900. static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
  901. int register_oom_notifier(struct notifier_block *nb)
  902. {
  903. return blocking_notifier_chain_register(&oom_notify_list, nb);
  904. }
  905. EXPORT_SYMBOL_GPL(register_oom_notifier);
  906. int unregister_oom_notifier(struct notifier_block *nb)
  907. {
  908. return blocking_notifier_chain_unregister(&oom_notify_list, nb);
  909. }
  910. EXPORT_SYMBOL_GPL(unregister_oom_notifier);
  911. /**
  912. * out_of_memory - kill the "best" process when we run out of memory
  913. * @oc: pointer to struct oom_control
  914. *
  915. * If we run out of memory, we have the choice between either
  916. * killing a random task (bad), letting the system crash (worse)
  917. * OR try to be smart about which process to kill. Note that we
  918. * don't have to be perfect here, we just have to be good.
  919. */
  920. bool out_of_memory(struct oom_control *oc)
  921. {
  922. unsigned long freed = 0;
  923. enum oom_constraint constraint = CONSTRAINT_NONE;
  924. if (oom_killer_disabled)
  925. return false;
  926. if (!is_memcg_oom(oc)) {
  927. blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
  928. if (freed > 0)
  929. /* Got some memory back in the last second. */
  930. return true;
  931. }
  932. /*
  933. * If current has a pending SIGKILL or is exiting, then automatically
  934. * select it. The goal is to allow it to allocate so that it may
  935. * quickly exit and free its memory.
  936. */
  937. if (task_will_free_mem(current)) {
  938. mark_oom_victim(current);
  939. wake_oom_reaper(current);
  940. return true;
  941. }
  942. /*
  943. * The OOM killer does not compensate for IO-less reclaim.
  944. * pagefault_out_of_memory lost its gfp context so we have to
  945. * make sure exclude 0 mask - all other users should have at least
  946. * ___GFP_DIRECT_RECLAIM to get here.
  947. */
  948. if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
  949. return true;
  950. /*
  951. * Check if there were limitations on the allocation (only relevant for
  952. * NUMA and memcg) that may require different handling.
  953. */
  954. constraint = constrained_alloc(oc);
  955. if (constraint != CONSTRAINT_MEMORY_POLICY)
  956. oc->nodemask = NULL;
  957. check_panic_on_oom(oc, constraint);
  958. if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
  959. current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
  960. current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
  961. get_task_struct(current);
  962. oc->chosen = current;
  963. oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
  964. return true;
  965. }
  966. select_bad_process(oc);
  967. /* Found nothing?!?! Either we hang forever, or we panic. */
  968. if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
  969. dump_header(oc, NULL);
  970. panic("Out of memory and no killable processes...\n");
  971. }
  972. if (oc->chosen && oc->chosen != (void *)-1UL)
  973. oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
  974. "Memory cgroup out of memory");
  975. return !!oc->chosen;
  976. }
  977. /*
  978. * The pagefault handler calls here because it is out of memory, so kill a
  979. * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
  980. * killing is already in progress so do nothing.
  981. */
  982. void pagefault_out_of_memory(void)
  983. {
  984. struct oom_control oc = {
  985. .zonelist = NULL,
  986. .nodemask = NULL,
  987. .memcg = NULL,
  988. .gfp_mask = 0,
  989. .order = 0,
  990. };
  991. if (mem_cgroup_oom_synchronize(true))
  992. return;
  993. if (!mutex_trylock(&oom_lock))
  994. return;
  995. out_of_memory(&oc);
  996. mutex_unlock(&oom_lock);
  997. }