exit.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/iocontext.h>
  15. #include <linux/key.h>
  16. #include <linux/security.h>
  17. #include <linux/cpu.h>
  18. #include <linux/acct.h>
  19. #include <linux/tsacct_kern.h>
  20. #include <linux/file.h>
  21. #include <linux/fdtable.h>
  22. #include <linux/freezer.h>
  23. #include <linux/binfmts.h>
  24. #include <linux/nsproxy.h>
  25. #include <linux/pid_namespace.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/profile.h>
  28. #include <linux/mount.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/kthread.h>
  31. #include <linux/mempolicy.h>
  32. #include <linux/taskstats_kern.h>
  33. #include <linux/delayacct.h>
  34. #include <linux/cgroup.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/signal.h>
  37. #include <linux/posix-timers.h>
  38. #include <linux/cn_proc.h>
  39. #include <linux/mutex.h>
  40. #include <linux/futex.h>
  41. #include <linux/pipe_fs_i.h>
  42. #include <linux/audit.h> /* for audit_free() */
  43. #include <linux/resource.h>
  44. #include <linux/blkdev.h>
  45. #include <linux/task_io_accounting_ops.h>
  46. #include <linux/tracehook.h>
  47. #include <linux/fs_struct.h>
  48. #include <linux/init_task.h>
  49. #include <linux/perf_event.h>
  50. #include <trace/events/sched.h>
  51. #include <linux/hw_breakpoint.h>
  52. #include <linux/oom.h>
  53. #include <linux/writeback.h>
  54. #include <linux/shm.h>
  55. #include <asm/uaccess.h>
  56. #include <asm/unistd.h>
  57. #include <asm/pgtable.h>
  58. #include <asm/mmu_context.h>
  59. static void exit_mm(struct task_struct *tsk);
  60. static void __unhash_process(struct task_struct *p, bool group_dead)
  61. {
  62. nr_threads--;
  63. detach_pid(p, PIDTYPE_PID);
  64. if (group_dead) {
  65. detach_pid(p, PIDTYPE_PGID);
  66. detach_pid(p, PIDTYPE_SID);
  67. list_del_rcu(&p->tasks);
  68. list_del_init(&p->sibling);
  69. __this_cpu_dec(process_counts);
  70. }
  71. list_del_rcu(&p->thread_group);
  72. list_del_rcu(&p->thread_node);
  73. }
  74. /*
  75. * This function expects the tasklist_lock write-locked.
  76. */
  77. static void __exit_signal(struct task_struct *tsk)
  78. {
  79. struct signal_struct *sig = tsk->signal;
  80. bool group_dead = thread_group_leader(tsk);
  81. struct sighand_struct *sighand;
  82. struct tty_struct *uninitialized_var(tty);
  83. cputime_t utime, stime;
  84. sighand = rcu_dereference_check(tsk->sighand,
  85. lockdep_tasklist_lock_is_held());
  86. spin_lock(&sighand->siglock);
  87. posix_cpu_timers_exit(tsk);
  88. if (group_dead) {
  89. posix_cpu_timers_exit_group(tsk);
  90. tty = sig->tty;
  91. sig->tty = NULL;
  92. } else {
  93. /*
  94. * This can only happen if the caller is de_thread().
  95. * FIXME: this is the temporary hack, we should teach
  96. * posix-cpu-timers to handle this case correctly.
  97. */
  98. if (unlikely(has_group_leader_pid(tsk)))
  99. posix_cpu_timers_exit_group(tsk);
  100. /*
  101. * If there is any task waiting for the group exit
  102. * then notify it:
  103. */
  104. if (sig->notify_count > 0 && !--sig->notify_count)
  105. wake_up_process(sig->group_exit_task);
  106. if (tsk == sig->curr_target)
  107. sig->curr_target = next_thread(tsk);
  108. }
  109. /*
  110. * Accumulate here the counters for all threads but the group leader
  111. * as they die, so they can be added into the process-wide totals
  112. * when those are taken. The group leader stays around as a zombie as
  113. * long as there are other threads. When it gets reaped, the exit.c
  114. * code will add its counts into these totals. We won't ever get here
  115. * for the group leader, since it will have been the last reference on
  116. * the signal_struct.
  117. */
  118. task_cputime(tsk, &utime, &stime);
  119. write_seqlock(&sig->stats_lock);
  120. sig->utime += utime;
  121. sig->stime += stime;
  122. sig->gtime += task_gtime(tsk);
  123. sig->min_flt += tsk->min_flt;
  124. sig->maj_flt += tsk->maj_flt;
  125. sig->nvcsw += tsk->nvcsw;
  126. sig->nivcsw += tsk->nivcsw;
  127. sig->inblock += task_io_get_inblock(tsk);
  128. sig->oublock += task_io_get_oublock(tsk);
  129. task_io_accounting_add(&sig->ioac, &tsk->ioac);
  130. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  131. sig->nr_threads--;
  132. __unhash_process(tsk, group_dead);
  133. write_sequnlock(&sig->stats_lock);
  134. /*
  135. * Do this under ->siglock, we can race with another thread
  136. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  137. */
  138. flush_sigqueue(&tsk->pending);
  139. tsk->sighand = NULL;
  140. spin_unlock(&sighand->siglock);
  141. __cleanup_sighand(sighand);
  142. clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
  143. if (group_dead) {
  144. flush_sigqueue(&sig->shared_pending);
  145. tty_kref_put(tty);
  146. }
  147. }
  148. static void delayed_put_task_struct(struct rcu_head *rhp)
  149. {
  150. struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  151. perf_event_delayed_put(tsk);
  152. trace_sched_process_free(tsk);
  153. put_task_struct(tsk);
  154. }
  155. void release_task(struct task_struct *p)
  156. {
  157. struct task_struct *leader;
  158. int zap_leader;
  159. repeat:
  160. /* don't need to get the RCU readlock here - the process is dead and
  161. * can't be modifying its own credentials. But shut RCU-lockdep up */
  162. rcu_read_lock();
  163. atomic_dec(&__task_cred(p)->user->processes);
  164. rcu_read_unlock();
  165. proc_flush_task(p);
  166. write_lock_irq(&tasklist_lock);
  167. ptrace_release_task(p);
  168. __exit_signal(p);
  169. /*
  170. * If we are the last non-leader member of the thread
  171. * group, and the leader is zombie, then notify the
  172. * group leader's parent process. (if it wants notification.)
  173. */
  174. zap_leader = 0;
  175. leader = p->group_leader;
  176. if (leader != p && thread_group_empty(leader)
  177. && leader->exit_state == EXIT_ZOMBIE) {
  178. /*
  179. * If we were the last child thread and the leader has
  180. * exited already, and the leader's parent ignores SIGCHLD,
  181. * then we are the one who should release the leader.
  182. */
  183. zap_leader = do_notify_parent(leader, leader->exit_signal);
  184. if (zap_leader)
  185. leader->exit_state = EXIT_DEAD;
  186. }
  187. write_unlock_irq(&tasklist_lock);
  188. release_thread(p);
  189. call_rcu(&p->rcu, delayed_put_task_struct);
  190. p = leader;
  191. if (unlikely(zap_leader))
  192. goto repeat;
  193. }
  194. /*
  195. * This checks not only the pgrp, but falls back on the pid if no
  196. * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
  197. * without this...
  198. *
  199. * The caller must hold rcu lock or the tasklist lock.
  200. */
  201. struct pid *session_of_pgrp(struct pid *pgrp)
  202. {
  203. struct task_struct *p;
  204. struct pid *sid = NULL;
  205. p = pid_task(pgrp, PIDTYPE_PGID);
  206. if (p == NULL)
  207. p = pid_task(pgrp, PIDTYPE_PID);
  208. if (p != NULL)
  209. sid = task_session(p);
  210. return sid;
  211. }
  212. /*
  213. * Determine if a process group is "orphaned", according to the POSIX
  214. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  215. * by terminal-generated stop signals. Newly orphaned process groups are
  216. * to receive a SIGHUP and a SIGCONT.
  217. *
  218. * "I ask you, have you ever known what it is to be an orphan?"
  219. */
  220. static int will_become_orphaned_pgrp(struct pid *pgrp,
  221. struct task_struct *ignored_task)
  222. {
  223. struct task_struct *p;
  224. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  225. if ((p == ignored_task) ||
  226. (p->exit_state && thread_group_empty(p)) ||
  227. is_global_init(p->real_parent))
  228. continue;
  229. if (task_pgrp(p->real_parent) != pgrp &&
  230. task_session(p->real_parent) == task_session(p))
  231. return 0;
  232. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  233. return 1;
  234. }
  235. int is_current_pgrp_orphaned(void)
  236. {
  237. int retval;
  238. read_lock(&tasklist_lock);
  239. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  240. read_unlock(&tasklist_lock);
  241. return retval;
  242. }
  243. static bool has_stopped_jobs(struct pid *pgrp)
  244. {
  245. struct task_struct *p;
  246. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  247. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  248. return true;
  249. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  250. return false;
  251. }
  252. /*
  253. * Check to see if any process groups have become orphaned as
  254. * a result of our exiting, and if they have any stopped jobs,
  255. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  256. */
  257. static void
  258. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  259. {
  260. struct pid *pgrp = task_pgrp(tsk);
  261. struct task_struct *ignored_task = tsk;
  262. if (!parent)
  263. /* exit: our father is in a different pgrp than
  264. * we are and we were the only connection outside.
  265. */
  266. parent = tsk->real_parent;
  267. else
  268. /* reparent: our child is in a different pgrp than
  269. * we are, and it was the only connection outside.
  270. */
  271. ignored_task = NULL;
  272. if (task_pgrp(parent) != pgrp &&
  273. task_session(parent) == task_session(tsk) &&
  274. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  275. has_stopped_jobs(pgrp)) {
  276. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  277. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  278. }
  279. }
  280. #ifdef CONFIG_MEMCG
  281. /*
  282. * A task is exiting. If it owned this mm, find a new owner for the mm.
  283. */
  284. void mm_update_next_owner(struct mm_struct *mm)
  285. {
  286. struct task_struct *c, *g, *p = current;
  287. retry:
  288. /*
  289. * If the exiting or execing task is not the owner, it's
  290. * someone else's problem.
  291. */
  292. if (mm->owner != p)
  293. return;
  294. /*
  295. * The current owner is exiting/execing and there are no other
  296. * candidates. Do not leave the mm pointing to a possibly
  297. * freed task structure.
  298. */
  299. if (atomic_read(&mm->mm_users) <= 1) {
  300. mm->owner = NULL;
  301. return;
  302. }
  303. read_lock(&tasklist_lock);
  304. /*
  305. * Search in the children
  306. */
  307. list_for_each_entry(c, &p->children, sibling) {
  308. if (c->mm == mm)
  309. goto assign_new_owner;
  310. }
  311. /*
  312. * Search in the siblings
  313. */
  314. list_for_each_entry(c, &p->real_parent->children, sibling) {
  315. if (c->mm == mm)
  316. goto assign_new_owner;
  317. }
  318. /*
  319. * Search through everything else, we should not get here often.
  320. */
  321. for_each_process(g) {
  322. if (g->flags & PF_KTHREAD)
  323. continue;
  324. for_each_thread(g, c) {
  325. if (c->mm == mm)
  326. goto assign_new_owner;
  327. if (c->mm)
  328. break;
  329. }
  330. }
  331. read_unlock(&tasklist_lock);
  332. /*
  333. * We found no owner yet mm_users > 1: this implies that we are
  334. * most likely racing with swapoff (try_to_unuse()) or /proc or
  335. * ptrace or page migration (get_task_mm()). Mark owner as NULL.
  336. */
  337. mm->owner = NULL;
  338. return;
  339. assign_new_owner:
  340. BUG_ON(c == p);
  341. get_task_struct(c);
  342. /*
  343. * The task_lock protects c->mm from changing.
  344. * We always want mm->owner->mm == mm
  345. */
  346. task_lock(c);
  347. /*
  348. * Delay read_unlock() till we have the task_lock()
  349. * to ensure that c does not slip away underneath us
  350. */
  351. read_unlock(&tasklist_lock);
  352. if (c->mm != mm) {
  353. task_unlock(c);
  354. put_task_struct(c);
  355. goto retry;
  356. }
  357. mm->owner = c;
  358. task_unlock(c);
  359. put_task_struct(c);
  360. }
  361. #endif /* CONFIG_MEMCG */
  362. /*
  363. * Turn us into a lazy TLB process if we
  364. * aren't already..
  365. */
  366. static void exit_mm(struct task_struct *tsk)
  367. {
  368. struct mm_struct *mm = tsk->mm;
  369. struct core_state *core_state;
  370. mm_release(tsk, mm);
  371. if (!mm)
  372. return;
  373. sync_mm_rss(mm);
  374. /*
  375. * Serialize with any possible pending coredump.
  376. * We must hold mmap_sem around checking core_state
  377. * and clearing tsk->mm. The core-inducing thread
  378. * will increment ->nr_threads for each thread in the
  379. * group with ->mm != NULL.
  380. */
  381. down_read(&mm->mmap_sem);
  382. core_state = mm->core_state;
  383. if (core_state) {
  384. struct core_thread self;
  385. up_read(&mm->mmap_sem);
  386. self.task = tsk;
  387. self.next = xchg(&core_state->dumper.next, &self);
  388. /*
  389. * Implies mb(), the result of xchg() must be visible
  390. * to core_state->dumper.
  391. */
  392. if (atomic_dec_and_test(&core_state->nr_threads))
  393. complete(&core_state->startup);
  394. for (;;) {
  395. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  396. if (!self.task) /* see coredump_finish() */
  397. break;
  398. freezable_schedule();
  399. }
  400. __set_task_state(tsk, TASK_RUNNING);
  401. down_read(&mm->mmap_sem);
  402. }
  403. atomic_inc(&mm->mm_count);
  404. BUG_ON(mm != tsk->active_mm);
  405. /* more a memory barrier than a real lock */
  406. task_lock(tsk);
  407. tsk->mm = NULL;
  408. up_read(&mm->mmap_sem);
  409. enter_lazy_tlb(mm, current);
  410. task_unlock(tsk);
  411. mm_update_next_owner(mm);
  412. mmput(mm);
  413. clear_thread_flag(TIF_MEMDIE);
  414. }
  415. /*
  416. * When we die, we re-parent all our children, and try to:
  417. * 1. give them to another thread in our thread group, if such a member exists
  418. * 2. give it to the first ancestor process which prctl'd itself as a
  419. * child_subreaper for its children (like a service manager)
  420. * 3. give it to the init process (PID 1) in our pid namespace
  421. */
  422. static struct task_struct *find_new_reaper(struct task_struct *father)
  423. __releases(&tasklist_lock)
  424. __acquires(&tasklist_lock)
  425. {
  426. struct pid_namespace *pid_ns = task_active_pid_ns(father);
  427. struct task_struct *thread;
  428. thread = father;
  429. while_each_thread(father, thread) {
  430. if (thread->flags & PF_EXITING)
  431. continue;
  432. if (unlikely(pid_ns->child_reaper == father))
  433. pid_ns->child_reaper = thread;
  434. return thread;
  435. }
  436. if (unlikely(pid_ns->child_reaper == father)) {
  437. write_unlock_irq(&tasklist_lock);
  438. if (unlikely(pid_ns == &init_pid_ns)) {
  439. panic("Attempted to kill init! exitcode=0x%08x\n",
  440. father->signal->group_exit_code ?:
  441. father->exit_code);
  442. }
  443. zap_pid_ns_processes(pid_ns);
  444. write_lock_irq(&tasklist_lock);
  445. } else if (father->signal->has_child_subreaper) {
  446. struct task_struct *reaper;
  447. /*
  448. * Find the first ancestor marked as child_subreaper.
  449. * Note that the code below checks same_thread_group(reaper,
  450. * pid_ns->child_reaper). This is what we need to DTRT in a
  451. * PID namespace. However we still need the check above, see
  452. * http://marc.info/?l=linux-kernel&m=131385460420380
  453. */
  454. for (reaper = father->real_parent;
  455. reaper != &init_task;
  456. reaper = reaper->real_parent) {
  457. if (same_thread_group(reaper, pid_ns->child_reaper))
  458. break;
  459. if (!reaper->signal->is_child_subreaper)
  460. continue;
  461. thread = reaper;
  462. do {
  463. if (!(thread->flags & PF_EXITING))
  464. return reaper;
  465. } while_each_thread(reaper, thread);
  466. }
  467. }
  468. return pid_ns->child_reaper;
  469. }
  470. /*
  471. * Any that need to be release_task'd are put on the @dead list.
  472. */
  473. static void reparent_leader(struct task_struct *father, struct task_struct *p,
  474. struct list_head *dead)
  475. {
  476. list_move_tail(&p->sibling, &p->real_parent->children);
  477. if (p->exit_state == EXIT_DEAD)
  478. return;
  479. /*
  480. * If this is a threaded reparent there is no need to
  481. * notify anyone anything has happened.
  482. */
  483. if (same_thread_group(p->real_parent, father))
  484. return;
  485. /* We don't want people slaying init. */
  486. p->exit_signal = SIGCHLD;
  487. /* If it has exited notify the new parent about this child's death. */
  488. if (!p->ptrace &&
  489. p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
  490. if (do_notify_parent(p, p->exit_signal)) {
  491. p->exit_state = EXIT_DEAD;
  492. list_move_tail(&p->sibling, dead);
  493. }
  494. }
  495. kill_orphaned_pgrp(p, father);
  496. }
  497. static void forget_original_parent(struct task_struct *father)
  498. {
  499. struct task_struct *p, *n, *reaper;
  500. LIST_HEAD(dead_children);
  501. write_lock_irq(&tasklist_lock);
  502. /*
  503. * Note that exit_ptrace() and find_new_reaper() might
  504. * drop tasklist_lock and reacquire it.
  505. */
  506. exit_ptrace(father);
  507. reaper = find_new_reaper(father);
  508. list_for_each_entry_safe(p, n, &father->children, sibling) {
  509. struct task_struct *t = p;
  510. do {
  511. t->real_parent = reaper;
  512. if (t->parent == father) {
  513. BUG_ON(t->ptrace);
  514. t->parent = t->real_parent;
  515. }
  516. if (t->pdeath_signal)
  517. group_send_sig_info(t->pdeath_signal,
  518. SEND_SIG_NOINFO, t);
  519. } while_each_thread(p, t);
  520. reparent_leader(father, p, &dead_children);
  521. }
  522. write_unlock_irq(&tasklist_lock);
  523. BUG_ON(!list_empty(&father->children));
  524. list_for_each_entry_safe(p, n, &dead_children, sibling) {
  525. list_del_init(&p->sibling);
  526. release_task(p);
  527. }
  528. }
  529. /*
  530. * Send signals to all our closest relatives so that they know
  531. * to properly mourn us..
  532. */
  533. static void exit_notify(struct task_struct *tsk, int group_dead)
  534. {
  535. bool autoreap;
  536. /*
  537. * This does two things:
  538. *
  539. * A. Make init inherit all the child processes
  540. * B. Check to see if any process groups have become orphaned
  541. * as a result of our exiting, and if they have any stopped
  542. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  543. */
  544. forget_original_parent(tsk);
  545. write_lock_irq(&tasklist_lock);
  546. if (group_dead)
  547. kill_orphaned_pgrp(tsk->group_leader, NULL);
  548. if (unlikely(tsk->ptrace)) {
  549. int sig = thread_group_leader(tsk) &&
  550. thread_group_empty(tsk) &&
  551. !ptrace_reparented(tsk) ?
  552. tsk->exit_signal : SIGCHLD;
  553. autoreap = do_notify_parent(tsk, sig);
  554. } else if (thread_group_leader(tsk)) {
  555. autoreap = thread_group_empty(tsk) &&
  556. do_notify_parent(tsk, tsk->exit_signal);
  557. } else {
  558. autoreap = true;
  559. }
  560. tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
  561. /* mt-exec, de_thread() is waiting for group leader */
  562. if (unlikely(tsk->signal->notify_count < 0))
  563. wake_up_process(tsk->signal->group_exit_task);
  564. write_unlock_irq(&tasklist_lock);
  565. /* If the process is dead, release it - nobody will wait for it */
  566. if (autoreap)
  567. release_task(tsk);
  568. }
  569. #ifdef CONFIG_DEBUG_STACK_USAGE
  570. static void check_stack_usage(void)
  571. {
  572. static DEFINE_SPINLOCK(low_water_lock);
  573. static int lowest_to_date = THREAD_SIZE;
  574. unsigned long free;
  575. free = stack_not_used(current);
  576. if (free >= lowest_to_date)
  577. return;
  578. spin_lock(&low_water_lock);
  579. if (free < lowest_to_date) {
  580. pr_warn("%s (%d) used greatest stack depth: %lu bytes left\n",
  581. current->comm, task_pid_nr(current), free);
  582. lowest_to_date = free;
  583. }
  584. spin_unlock(&low_water_lock);
  585. }
  586. #else
  587. static inline void check_stack_usage(void) {}
  588. #endif
  589. void do_exit(long code)
  590. {
  591. struct task_struct *tsk = current;
  592. int group_dead;
  593. TASKS_RCU(int tasks_rcu_i);
  594. profile_task_exit(tsk);
  595. WARN_ON(blk_needs_flush_plug(tsk));
  596. if (unlikely(in_interrupt()))
  597. panic("Aiee, killing interrupt handler!");
  598. if (unlikely(!tsk->pid))
  599. panic("Attempted to kill the idle task!");
  600. /*
  601. * If do_exit is called because this processes oopsed, it's possible
  602. * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
  603. * continuing. Amongst other possible reasons, this is to prevent
  604. * mm_release()->clear_child_tid() from writing to a user-controlled
  605. * kernel address.
  606. */
  607. set_fs(USER_DS);
  608. ptrace_event(PTRACE_EVENT_EXIT, code);
  609. validate_creds_for_do_exit(tsk);
  610. /*
  611. * We're taking recursive faults here in do_exit. Safest is to just
  612. * leave this task alone and wait for reboot.
  613. */
  614. if (unlikely(tsk->flags & PF_EXITING)) {
  615. pr_alert("Fixing recursive fault but reboot is needed!\n");
  616. /*
  617. * We can do this unlocked here. The futex code uses
  618. * this flag just to verify whether the pi state
  619. * cleanup has been done or not. In the worst case it
  620. * loops once more. We pretend that the cleanup was
  621. * done as there is no way to return. Either the
  622. * OWNER_DIED bit is set by now or we push the blocked
  623. * task into the wait for ever nirwana as well.
  624. */
  625. tsk->flags |= PF_EXITPIDONE;
  626. set_current_state(TASK_UNINTERRUPTIBLE);
  627. schedule();
  628. }
  629. exit_signals(tsk); /* sets PF_EXITING */
  630. /*
  631. * tsk->flags are checked in the futex code to protect against
  632. * an exiting task cleaning up the robust pi futexes.
  633. */
  634. smp_mb();
  635. raw_spin_unlock_wait(&tsk->pi_lock);
  636. if (unlikely(in_atomic()))
  637. pr_info("note: %s[%d] exited with preempt_count %d\n",
  638. current->comm, task_pid_nr(current),
  639. preempt_count());
  640. acct_update_integrals(tsk);
  641. /* sync mm's RSS info before statistics gathering */
  642. if (tsk->mm)
  643. sync_mm_rss(tsk->mm);
  644. group_dead = atomic_dec_and_test(&tsk->signal->live);
  645. if (group_dead) {
  646. hrtimer_cancel(&tsk->signal->real_timer);
  647. exit_itimers(tsk->signal);
  648. if (tsk->mm)
  649. setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
  650. }
  651. acct_collect(code, group_dead);
  652. if (group_dead)
  653. tty_audit_exit();
  654. audit_free(tsk);
  655. tsk->exit_code = code;
  656. taskstats_exit(tsk, group_dead);
  657. exit_mm(tsk);
  658. if (group_dead)
  659. acct_process();
  660. trace_sched_process_exit(tsk);
  661. exit_sem(tsk);
  662. exit_shm(tsk);
  663. exit_files(tsk);
  664. exit_fs(tsk);
  665. if (group_dead)
  666. disassociate_ctty(1);
  667. exit_task_namespaces(tsk);
  668. exit_task_work(tsk);
  669. exit_thread();
  670. /*
  671. * Flush inherited counters to the parent - before the parent
  672. * gets woken up by child-exit notifications.
  673. *
  674. * because of cgroup mode, must be called before cgroup_exit()
  675. */
  676. perf_event_exit_task(tsk);
  677. cgroup_exit(tsk);
  678. module_put(task_thread_info(tsk)->exec_domain->module);
  679. /*
  680. * FIXME: do that only when needed, using sched_exit tracepoint
  681. */
  682. flush_ptrace_hw_breakpoint(tsk);
  683. TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
  684. exit_notify(tsk, group_dead);
  685. proc_exit_connector(tsk);
  686. #ifdef CONFIG_NUMA
  687. task_lock(tsk);
  688. mpol_put(tsk->mempolicy);
  689. tsk->mempolicy = NULL;
  690. task_unlock(tsk);
  691. #endif
  692. #ifdef CONFIG_FUTEX
  693. if (unlikely(current->pi_state_cache))
  694. kfree(current->pi_state_cache);
  695. #endif
  696. /*
  697. * Make sure we are holding no locks:
  698. */
  699. debug_check_no_locks_held();
  700. /*
  701. * We can do this unlocked here. The futex code uses this flag
  702. * just to verify whether the pi state cleanup has been done
  703. * or not. In the worst case it loops once more.
  704. */
  705. tsk->flags |= PF_EXITPIDONE;
  706. if (tsk->io_context)
  707. exit_io_context(tsk);
  708. if (tsk->splice_pipe)
  709. free_pipe_info(tsk->splice_pipe);
  710. if (tsk->task_frag.page)
  711. put_page(tsk->task_frag.page);
  712. validate_creds_for_do_exit(tsk);
  713. check_stack_usage();
  714. preempt_disable();
  715. if (tsk->nr_dirtied)
  716. __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
  717. exit_rcu();
  718. TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
  719. /*
  720. * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
  721. * when the following two conditions become true.
  722. * - There is race condition of mmap_sem (It is acquired by
  723. * exit_mm()), and
  724. * - SMI occurs before setting TASK_RUNINNG.
  725. * (or hypervisor of virtual machine switches to other guest)
  726. * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
  727. *
  728. * To avoid it, we have to wait for releasing tsk->pi_lock which
  729. * is held by try_to_wake_up()
  730. */
  731. smp_mb();
  732. raw_spin_unlock_wait(&tsk->pi_lock);
  733. /* causes final put_task_struct in finish_task_switch(). */
  734. tsk->state = TASK_DEAD;
  735. tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
  736. schedule();
  737. BUG();
  738. /* Avoid "noreturn function does return". */
  739. for (;;)
  740. cpu_relax(); /* For when BUG is null */
  741. }
  742. EXPORT_SYMBOL_GPL(do_exit);
  743. void complete_and_exit(struct completion *comp, long code)
  744. {
  745. if (comp)
  746. complete(comp);
  747. do_exit(code);
  748. }
  749. EXPORT_SYMBOL(complete_and_exit);
  750. SYSCALL_DEFINE1(exit, int, error_code)
  751. {
  752. do_exit((error_code&0xff)<<8);
  753. }
  754. /*
  755. * Take down every thread in the group. This is called by fatal signals
  756. * as well as by sys_exit_group (below).
  757. */
  758. void
  759. do_group_exit(int exit_code)
  760. {
  761. struct signal_struct *sig = current->signal;
  762. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  763. if (signal_group_exit(sig))
  764. exit_code = sig->group_exit_code;
  765. else if (!thread_group_empty(current)) {
  766. struct sighand_struct *const sighand = current->sighand;
  767. spin_lock_irq(&sighand->siglock);
  768. if (signal_group_exit(sig))
  769. /* Another thread got here before we took the lock. */
  770. exit_code = sig->group_exit_code;
  771. else {
  772. sig->group_exit_code = exit_code;
  773. sig->flags = SIGNAL_GROUP_EXIT;
  774. zap_other_threads(current);
  775. }
  776. spin_unlock_irq(&sighand->siglock);
  777. }
  778. do_exit(exit_code);
  779. /* NOTREACHED */
  780. }
  781. /*
  782. * this kills every thread in the thread group. Note that any externally
  783. * wait4()-ing process will get the correct exit code - even if this
  784. * thread is not the thread group leader.
  785. */
  786. SYSCALL_DEFINE1(exit_group, int, error_code)
  787. {
  788. do_group_exit((error_code & 0xff) << 8);
  789. /* NOTREACHED */
  790. return 0;
  791. }
  792. struct wait_opts {
  793. enum pid_type wo_type;
  794. int wo_flags;
  795. struct pid *wo_pid;
  796. struct siginfo __user *wo_info;
  797. int __user *wo_stat;
  798. struct rusage __user *wo_rusage;
  799. wait_queue_t child_wait;
  800. int notask_error;
  801. };
  802. static inline
  803. struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  804. {
  805. if (type != PIDTYPE_PID)
  806. task = task->group_leader;
  807. return task->pids[type].pid;
  808. }
  809. static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
  810. {
  811. return wo->wo_type == PIDTYPE_MAX ||
  812. task_pid_type(p, wo->wo_type) == wo->wo_pid;
  813. }
  814. static int eligible_child(struct wait_opts *wo, struct task_struct *p)
  815. {
  816. if (!eligible_pid(wo, p))
  817. return 0;
  818. /* Wait for all children (clone and not) if __WALL is set;
  819. * otherwise, wait for clone children *only* if __WCLONE is
  820. * set; otherwise, wait for non-clone children *only*. (Note:
  821. * A "clone" child here is one that reports to its parent
  822. * using a signal other than SIGCHLD.) */
  823. if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
  824. && !(wo->wo_flags & __WALL))
  825. return 0;
  826. return 1;
  827. }
  828. static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
  829. pid_t pid, uid_t uid, int why, int status)
  830. {
  831. struct siginfo __user *infop;
  832. int retval = wo->wo_rusage
  833. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  834. put_task_struct(p);
  835. infop = wo->wo_info;
  836. if (infop) {
  837. if (!retval)
  838. retval = put_user(SIGCHLD, &infop->si_signo);
  839. if (!retval)
  840. retval = put_user(0, &infop->si_errno);
  841. if (!retval)
  842. retval = put_user((short)why, &infop->si_code);
  843. if (!retval)
  844. retval = put_user(pid, &infop->si_pid);
  845. if (!retval)
  846. retval = put_user(uid, &infop->si_uid);
  847. if (!retval)
  848. retval = put_user(status, &infop->si_status);
  849. }
  850. if (!retval)
  851. retval = pid;
  852. return retval;
  853. }
  854. /*
  855. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  856. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  857. * the lock and this task is uninteresting. If we return nonzero, we have
  858. * released the lock and the system call should return.
  859. */
  860. static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
  861. {
  862. unsigned long state;
  863. int retval, status, traced;
  864. pid_t pid = task_pid_vnr(p);
  865. uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
  866. struct siginfo __user *infop;
  867. if (!likely(wo->wo_flags & WEXITED))
  868. return 0;
  869. if (unlikely(wo->wo_flags & WNOWAIT)) {
  870. int exit_code = p->exit_code;
  871. int why;
  872. get_task_struct(p);
  873. read_unlock(&tasklist_lock);
  874. if ((exit_code & 0x7f) == 0) {
  875. why = CLD_EXITED;
  876. status = exit_code >> 8;
  877. } else {
  878. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  879. status = exit_code & 0x7f;
  880. }
  881. return wait_noreap_copyout(wo, p, pid, uid, why, status);
  882. }
  883. traced = ptrace_reparented(p);
  884. /*
  885. * Move the task's state to DEAD/TRACE, only one thread can do this.
  886. */
  887. state = traced && thread_group_leader(p) ? EXIT_TRACE : EXIT_DEAD;
  888. if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
  889. return 0;
  890. /*
  891. * It can be ptraced but not reparented, check
  892. * thread_group_leader() to filter out sub-threads.
  893. */
  894. if (likely(!traced) && thread_group_leader(p)) {
  895. struct signal_struct *psig;
  896. struct signal_struct *sig;
  897. unsigned long maxrss;
  898. cputime_t tgutime, tgstime;
  899. /*
  900. * The resource counters for the group leader are in its
  901. * own task_struct. Those for dead threads in the group
  902. * are in its signal_struct, as are those for the child
  903. * processes it has previously reaped. All these
  904. * accumulate in the parent's signal_struct c* fields.
  905. *
  906. * We don't bother to take a lock here to protect these
  907. * p->signal fields, because they are only touched by
  908. * __exit_signal, which runs with tasklist_lock
  909. * write-locked anyway, and so is excluded here. We do
  910. * need to protect the access to parent->signal fields,
  911. * as other threads in the parent group can be right
  912. * here reaping other children at the same time.
  913. *
  914. * We use thread_group_cputime_adjusted() to get times for
  915. * the thread group, which consolidates times for all threads
  916. * in the group including the group leader.
  917. */
  918. thread_group_cputime_adjusted(p, &tgutime, &tgstime);
  919. spin_lock_irq(&p->real_parent->sighand->siglock);
  920. psig = p->real_parent->signal;
  921. sig = p->signal;
  922. write_seqlock(&psig->stats_lock);
  923. psig->cutime += tgutime + sig->cutime;
  924. psig->cstime += tgstime + sig->cstime;
  925. psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
  926. psig->cmin_flt +=
  927. p->min_flt + sig->min_flt + sig->cmin_flt;
  928. psig->cmaj_flt +=
  929. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  930. psig->cnvcsw +=
  931. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  932. psig->cnivcsw +=
  933. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  934. psig->cinblock +=
  935. task_io_get_inblock(p) +
  936. sig->inblock + sig->cinblock;
  937. psig->coublock +=
  938. task_io_get_oublock(p) +
  939. sig->oublock + sig->coublock;
  940. maxrss = max(sig->maxrss, sig->cmaxrss);
  941. if (psig->cmaxrss < maxrss)
  942. psig->cmaxrss = maxrss;
  943. task_io_accounting_add(&psig->ioac, &p->ioac);
  944. task_io_accounting_add(&psig->ioac, &sig->ioac);
  945. write_sequnlock(&psig->stats_lock);
  946. spin_unlock_irq(&p->real_parent->sighand->siglock);
  947. }
  948. /*
  949. * Now we are sure this task is interesting, and no other
  950. * thread can reap it because we its state == DEAD/TRACE.
  951. */
  952. read_unlock(&tasklist_lock);
  953. retval = wo->wo_rusage
  954. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  955. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  956. ? p->signal->group_exit_code : p->exit_code;
  957. if (!retval && wo->wo_stat)
  958. retval = put_user(status, wo->wo_stat);
  959. infop = wo->wo_info;
  960. if (!retval && infop)
  961. retval = put_user(SIGCHLD, &infop->si_signo);
  962. if (!retval && infop)
  963. retval = put_user(0, &infop->si_errno);
  964. if (!retval && infop) {
  965. int why;
  966. if ((status & 0x7f) == 0) {
  967. why = CLD_EXITED;
  968. status >>= 8;
  969. } else {
  970. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  971. status &= 0x7f;
  972. }
  973. retval = put_user((short)why, &infop->si_code);
  974. if (!retval)
  975. retval = put_user(status, &infop->si_status);
  976. }
  977. if (!retval && infop)
  978. retval = put_user(pid, &infop->si_pid);
  979. if (!retval && infop)
  980. retval = put_user(uid, &infop->si_uid);
  981. if (!retval)
  982. retval = pid;
  983. if (state == EXIT_TRACE) {
  984. write_lock_irq(&tasklist_lock);
  985. /* We dropped tasklist, ptracer could die and untrace */
  986. ptrace_unlink(p);
  987. /* If parent wants a zombie, don't release it now */
  988. state = EXIT_ZOMBIE;
  989. if (do_notify_parent(p, p->exit_signal))
  990. state = EXIT_DEAD;
  991. p->exit_state = state;
  992. write_unlock_irq(&tasklist_lock);
  993. }
  994. if (state == EXIT_DEAD)
  995. release_task(p);
  996. return retval;
  997. }
  998. static int *task_stopped_code(struct task_struct *p, bool ptrace)
  999. {
  1000. if (ptrace) {
  1001. if (task_is_stopped_or_traced(p) &&
  1002. !(p->jobctl & JOBCTL_LISTENING))
  1003. return &p->exit_code;
  1004. } else {
  1005. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  1006. return &p->signal->group_exit_code;
  1007. }
  1008. return NULL;
  1009. }
  1010. /**
  1011. * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
  1012. * @wo: wait options
  1013. * @ptrace: is the wait for ptrace
  1014. * @p: task to wait for
  1015. *
  1016. * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
  1017. *
  1018. * CONTEXT:
  1019. * read_lock(&tasklist_lock), which is released if return value is
  1020. * non-zero. Also, grabs and releases @p->sighand->siglock.
  1021. *
  1022. * RETURNS:
  1023. * 0 if wait condition didn't exist and search for other wait conditions
  1024. * should continue. Non-zero return, -errno on failure and @p's pid on
  1025. * success, implies that tasklist_lock is released and wait condition
  1026. * search should terminate.
  1027. */
  1028. static int wait_task_stopped(struct wait_opts *wo,
  1029. int ptrace, struct task_struct *p)
  1030. {
  1031. struct siginfo __user *infop;
  1032. int retval, exit_code, *p_code, why;
  1033. uid_t uid = 0; /* unneeded, required by compiler */
  1034. pid_t pid;
  1035. /*
  1036. * Traditionally we see ptrace'd stopped tasks regardless of options.
  1037. */
  1038. if (!ptrace && !(wo->wo_flags & WUNTRACED))
  1039. return 0;
  1040. if (!task_stopped_code(p, ptrace))
  1041. return 0;
  1042. exit_code = 0;
  1043. spin_lock_irq(&p->sighand->siglock);
  1044. p_code = task_stopped_code(p, ptrace);
  1045. if (unlikely(!p_code))
  1046. goto unlock_sig;
  1047. exit_code = *p_code;
  1048. if (!exit_code)
  1049. goto unlock_sig;
  1050. if (!unlikely(wo->wo_flags & WNOWAIT))
  1051. *p_code = 0;
  1052. uid = from_kuid_munged(current_user_ns(), task_uid(p));
  1053. unlock_sig:
  1054. spin_unlock_irq(&p->sighand->siglock);
  1055. if (!exit_code)
  1056. return 0;
  1057. /*
  1058. * Now we are pretty sure this task is interesting.
  1059. * Make sure it doesn't get reaped out from under us while we
  1060. * give up the lock and then examine it below. We don't want to
  1061. * keep holding onto the tasklist_lock while we call getrusage and
  1062. * possibly take page faults for user memory.
  1063. */
  1064. get_task_struct(p);
  1065. pid = task_pid_vnr(p);
  1066. why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
  1067. read_unlock(&tasklist_lock);
  1068. if (unlikely(wo->wo_flags & WNOWAIT))
  1069. return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
  1070. retval = wo->wo_rusage
  1071. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1072. if (!retval && wo->wo_stat)
  1073. retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
  1074. infop = wo->wo_info;
  1075. if (!retval && infop)
  1076. retval = put_user(SIGCHLD, &infop->si_signo);
  1077. if (!retval && infop)
  1078. retval = put_user(0, &infop->si_errno);
  1079. if (!retval && infop)
  1080. retval = put_user((short)why, &infop->si_code);
  1081. if (!retval && infop)
  1082. retval = put_user(exit_code, &infop->si_status);
  1083. if (!retval && infop)
  1084. retval = put_user(pid, &infop->si_pid);
  1085. if (!retval && infop)
  1086. retval = put_user(uid, &infop->si_uid);
  1087. if (!retval)
  1088. retval = pid;
  1089. put_task_struct(p);
  1090. BUG_ON(!retval);
  1091. return retval;
  1092. }
  1093. /*
  1094. * Handle do_wait work for one task in a live, non-stopped state.
  1095. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1096. * the lock and this task is uninteresting. If we return nonzero, we have
  1097. * released the lock and the system call should return.
  1098. */
  1099. static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
  1100. {
  1101. int retval;
  1102. pid_t pid;
  1103. uid_t uid;
  1104. if (!unlikely(wo->wo_flags & WCONTINUED))
  1105. return 0;
  1106. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1107. return 0;
  1108. spin_lock_irq(&p->sighand->siglock);
  1109. /* Re-check with the lock held. */
  1110. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1111. spin_unlock_irq(&p->sighand->siglock);
  1112. return 0;
  1113. }
  1114. if (!unlikely(wo->wo_flags & WNOWAIT))
  1115. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1116. uid = from_kuid_munged(current_user_ns(), task_uid(p));
  1117. spin_unlock_irq(&p->sighand->siglock);
  1118. pid = task_pid_vnr(p);
  1119. get_task_struct(p);
  1120. read_unlock(&tasklist_lock);
  1121. if (!wo->wo_info) {
  1122. retval = wo->wo_rusage
  1123. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1124. put_task_struct(p);
  1125. if (!retval && wo->wo_stat)
  1126. retval = put_user(0xffff, wo->wo_stat);
  1127. if (!retval)
  1128. retval = pid;
  1129. } else {
  1130. retval = wait_noreap_copyout(wo, p, pid, uid,
  1131. CLD_CONTINUED, SIGCONT);
  1132. BUG_ON(retval == 0);
  1133. }
  1134. return retval;
  1135. }
  1136. /*
  1137. * Consider @p for a wait by @parent.
  1138. *
  1139. * -ECHILD should be in ->notask_error before the first call.
  1140. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1141. * Returns zero if the search for a child should continue;
  1142. * then ->notask_error is 0 if @p is an eligible child,
  1143. * or another error from security_task_wait(), or still -ECHILD.
  1144. */
  1145. static int wait_consider_task(struct wait_opts *wo, int ptrace,
  1146. struct task_struct *p)
  1147. {
  1148. int ret;
  1149. if (unlikely(p->exit_state == EXIT_DEAD))
  1150. return 0;
  1151. ret = eligible_child(wo, p);
  1152. if (!ret)
  1153. return ret;
  1154. ret = security_task_wait(p);
  1155. if (unlikely(ret < 0)) {
  1156. /*
  1157. * If we have not yet seen any eligible child,
  1158. * then let this error code replace -ECHILD.
  1159. * A permission error will give the user a clue
  1160. * to look for security policy problems, rather
  1161. * than for mysterious wait bugs.
  1162. */
  1163. if (wo->notask_error)
  1164. wo->notask_error = ret;
  1165. return 0;
  1166. }
  1167. if (unlikely(p->exit_state == EXIT_TRACE)) {
  1168. /*
  1169. * ptrace == 0 means we are the natural parent. In this case
  1170. * we should clear notask_error, debugger will notify us.
  1171. */
  1172. if (likely(!ptrace))
  1173. wo->notask_error = 0;
  1174. return 0;
  1175. }
  1176. if (likely(!ptrace) && unlikely(p->ptrace)) {
  1177. /*
  1178. * If it is traced by its real parent's group, just pretend
  1179. * the caller is ptrace_do_wait() and reap this child if it
  1180. * is zombie.
  1181. *
  1182. * This also hides group stop state from real parent; otherwise
  1183. * a single stop can be reported twice as group and ptrace stop.
  1184. * If a ptracer wants to distinguish these two events for its
  1185. * own children it should create a separate process which takes
  1186. * the role of real parent.
  1187. */
  1188. if (!ptrace_reparented(p))
  1189. ptrace = 1;
  1190. }
  1191. /* slay zombie? */
  1192. if (p->exit_state == EXIT_ZOMBIE) {
  1193. /* we don't reap group leaders with subthreads */
  1194. if (!delay_group_leader(p)) {
  1195. /*
  1196. * A zombie ptracee is only visible to its ptracer.
  1197. * Notification and reaping will be cascaded to the
  1198. * real parent when the ptracer detaches.
  1199. */
  1200. if (unlikely(ptrace) || likely(!p->ptrace))
  1201. return wait_task_zombie(wo, p);
  1202. }
  1203. /*
  1204. * Allow access to stopped/continued state via zombie by
  1205. * falling through. Clearing of notask_error is complex.
  1206. *
  1207. * When !@ptrace:
  1208. *
  1209. * If WEXITED is set, notask_error should naturally be
  1210. * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
  1211. * so, if there are live subthreads, there are events to
  1212. * wait for. If all subthreads are dead, it's still safe
  1213. * to clear - this function will be called again in finite
  1214. * amount time once all the subthreads are released and
  1215. * will then return without clearing.
  1216. *
  1217. * When @ptrace:
  1218. *
  1219. * Stopped state is per-task and thus can't change once the
  1220. * target task dies. Only continued and exited can happen.
  1221. * Clear notask_error if WCONTINUED | WEXITED.
  1222. */
  1223. if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
  1224. wo->notask_error = 0;
  1225. } else {
  1226. /*
  1227. * @p is alive and it's gonna stop, continue or exit, so
  1228. * there always is something to wait for.
  1229. */
  1230. wo->notask_error = 0;
  1231. }
  1232. /*
  1233. * Wait for stopped. Depending on @ptrace, different stopped state
  1234. * is used and the two don't interact with each other.
  1235. */
  1236. ret = wait_task_stopped(wo, ptrace, p);
  1237. if (ret)
  1238. return ret;
  1239. /*
  1240. * Wait for continued. There's only one continued state and the
  1241. * ptracer can consume it which can confuse the real parent. Don't
  1242. * use WCONTINUED from ptracer. You don't need or want it.
  1243. */
  1244. return wait_task_continued(wo, p);
  1245. }
  1246. /*
  1247. * Do the work of do_wait() for one thread in the group, @tsk.
  1248. *
  1249. * -ECHILD should be in ->notask_error before the first call.
  1250. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1251. * Returns zero if the search for a child should continue; then
  1252. * ->notask_error is 0 if there were any eligible children,
  1253. * or another error from security_task_wait(), or still -ECHILD.
  1254. */
  1255. static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
  1256. {
  1257. struct task_struct *p;
  1258. list_for_each_entry(p, &tsk->children, sibling) {
  1259. int ret = wait_consider_task(wo, 0, p);
  1260. if (ret)
  1261. return ret;
  1262. }
  1263. return 0;
  1264. }
  1265. static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
  1266. {
  1267. struct task_struct *p;
  1268. list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
  1269. int ret = wait_consider_task(wo, 1, p);
  1270. if (ret)
  1271. return ret;
  1272. }
  1273. return 0;
  1274. }
  1275. static int child_wait_callback(wait_queue_t *wait, unsigned mode,
  1276. int sync, void *key)
  1277. {
  1278. struct wait_opts *wo = container_of(wait, struct wait_opts,
  1279. child_wait);
  1280. struct task_struct *p = key;
  1281. if (!eligible_pid(wo, p))
  1282. return 0;
  1283. if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
  1284. return 0;
  1285. return default_wake_function(wait, mode, sync, key);
  1286. }
  1287. void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
  1288. {
  1289. __wake_up_sync_key(&parent->signal->wait_chldexit,
  1290. TASK_INTERRUPTIBLE, 1, p);
  1291. }
  1292. static long do_wait(struct wait_opts *wo)
  1293. {
  1294. struct task_struct *tsk;
  1295. int retval;
  1296. trace_sched_process_wait(wo->wo_pid);
  1297. init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
  1298. wo->child_wait.private = current;
  1299. add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1300. repeat:
  1301. /*
  1302. * If there is nothing that can match our critiera just get out.
  1303. * We will clear ->notask_error to zero if we see any child that
  1304. * might later match our criteria, even if we are not able to reap
  1305. * it yet.
  1306. */
  1307. wo->notask_error = -ECHILD;
  1308. if ((wo->wo_type < PIDTYPE_MAX) &&
  1309. (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
  1310. goto notask;
  1311. set_current_state(TASK_INTERRUPTIBLE);
  1312. read_lock(&tasklist_lock);
  1313. tsk = current;
  1314. do {
  1315. retval = do_wait_thread(wo, tsk);
  1316. if (retval)
  1317. goto end;
  1318. retval = ptrace_do_wait(wo, tsk);
  1319. if (retval)
  1320. goto end;
  1321. if (wo->wo_flags & __WNOTHREAD)
  1322. break;
  1323. } while_each_thread(current, tsk);
  1324. read_unlock(&tasklist_lock);
  1325. notask:
  1326. retval = wo->notask_error;
  1327. if (!retval && !(wo->wo_flags & WNOHANG)) {
  1328. retval = -ERESTARTSYS;
  1329. if (!signal_pending(current)) {
  1330. schedule();
  1331. goto repeat;
  1332. }
  1333. }
  1334. end:
  1335. __set_current_state(TASK_RUNNING);
  1336. remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1337. return retval;
  1338. }
  1339. SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
  1340. infop, int, options, struct rusage __user *, ru)
  1341. {
  1342. struct wait_opts wo;
  1343. struct pid *pid = NULL;
  1344. enum pid_type type;
  1345. long ret;
  1346. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
  1347. return -EINVAL;
  1348. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1349. return -EINVAL;
  1350. switch (which) {
  1351. case P_ALL:
  1352. type = PIDTYPE_MAX;
  1353. break;
  1354. case P_PID:
  1355. type = PIDTYPE_PID;
  1356. if (upid <= 0)
  1357. return -EINVAL;
  1358. break;
  1359. case P_PGID:
  1360. type = PIDTYPE_PGID;
  1361. if (upid <= 0)
  1362. return -EINVAL;
  1363. break;
  1364. default:
  1365. return -EINVAL;
  1366. }
  1367. if (type < PIDTYPE_MAX)
  1368. pid = find_get_pid(upid);
  1369. wo.wo_type = type;
  1370. wo.wo_pid = pid;
  1371. wo.wo_flags = options;
  1372. wo.wo_info = infop;
  1373. wo.wo_stat = NULL;
  1374. wo.wo_rusage = ru;
  1375. ret = do_wait(&wo);
  1376. if (ret > 0) {
  1377. ret = 0;
  1378. } else if (infop) {
  1379. /*
  1380. * For a WNOHANG return, clear out all the fields
  1381. * we would set so the user can easily tell the
  1382. * difference.
  1383. */
  1384. if (!ret)
  1385. ret = put_user(0, &infop->si_signo);
  1386. if (!ret)
  1387. ret = put_user(0, &infop->si_errno);
  1388. if (!ret)
  1389. ret = put_user(0, &infop->si_code);
  1390. if (!ret)
  1391. ret = put_user(0, &infop->si_pid);
  1392. if (!ret)
  1393. ret = put_user(0, &infop->si_uid);
  1394. if (!ret)
  1395. ret = put_user(0, &infop->si_status);
  1396. }
  1397. put_pid(pid);
  1398. return ret;
  1399. }
  1400. SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  1401. int, options, struct rusage __user *, ru)
  1402. {
  1403. struct wait_opts wo;
  1404. struct pid *pid = NULL;
  1405. enum pid_type type;
  1406. long ret;
  1407. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1408. __WNOTHREAD|__WCLONE|__WALL))
  1409. return -EINVAL;
  1410. if (upid == -1)
  1411. type = PIDTYPE_MAX;
  1412. else if (upid < 0) {
  1413. type = PIDTYPE_PGID;
  1414. pid = find_get_pid(-upid);
  1415. } else if (upid == 0) {
  1416. type = PIDTYPE_PGID;
  1417. pid = get_task_pid(current, PIDTYPE_PGID);
  1418. } else /* upid > 0 */ {
  1419. type = PIDTYPE_PID;
  1420. pid = find_get_pid(upid);
  1421. }
  1422. wo.wo_type = type;
  1423. wo.wo_pid = pid;
  1424. wo.wo_flags = options | WEXITED;
  1425. wo.wo_info = NULL;
  1426. wo.wo_stat = stat_addr;
  1427. wo.wo_rusage = ru;
  1428. ret = do_wait(&wo);
  1429. put_pid(pid);
  1430. return ret;
  1431. }
  1432. #ifdef __ARCH_WANT_SYS_WAITPID
  1433. /*
  1434. * sys_waitpid() remains for compatibility. waitpid() should be
  1435. * implemented by calling sys_wait4() from libc.a.
  1436. */
  1437. SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
  1438. {
  1439. return sys_wait4(pid, stat_addr, options, NULL);
  1440. }
  1441. #endif