exit.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/sched/autogroup.h>
  9. #include <linux/sched/mm.h>
  10. #include <linux/sched/stat.h>
  11. #include <linux/sched/task.h>
  12. #include <linux/sched/task_stack.h>
  13. #include <linux/sched/cputime.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/module.h>
  16. #include <linux/capability.h>
  17. #include <linux/completion.h>
  18. #include <linux/personality.h>
  19. #include <linux/tty.h>
  20. #include <linux/iocontext.h>
  21. #include <linux/key.h>
  22. #include <linux/cpu.h>
  23. #include <linux/acct.h>
  24. #include <linux/tsacct_kern.h>
  25. #include <linux/file.h>
  26. #include <linux/fdtable.h>
  27. #include <linux/freezer.h>
  28. #include <linux/binfmts.h>
  29. #include <linux/nsproxy.h>
  30. #include <linux/pid_namespace.h>
  31. #include <linux/ptrace.h>
  32. #include <linux/profile.h>
  33. #include <linux/mount.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/kthread.h>
  36. #include <linux/mempolicy.h>
  37. #include <linux/taskstats_kern.h>
  38. #include <linux/delayacct.h>
  39. #include <linux/cgroup.h>
  40. #include <linux/syscalls.h>
  41. #include <linux/signal.h>
  42. #include <linux/posix-timers.h>
  43. #include <linux/cn_proc.h>
  44. #include <linux/mutex.h>
  45. #include <linux/futex.h>
  46. #include <linux/pipe_fs_i.h>
  47. #include <linux/audit.h> /* for audit_free() */
  48. #include <linux/resource.h>
  49. #include <linux/blkdev.h>
  50. #include <linux/task_io_accounting_ops.h>
  51. #include <linux/tracehook.h>
  52. #include <linux/fs_struct.h>
  53. #include <linux/init_task.h>
  54. #include <linux/perf_event.h>
  55. #include <trace/events/sched.h>
  56. #include <linux/hw_breakpoint.h>
  57. #include <linux/oom.h>
  58. #include <linux/writeback.h>
  59. #include <linux/shm.h>
  60. #include <linux/kcov.h>
  61. #include <linux/random.h>
  62. #include <linux/rcuwait.h>
  63. #include <linux/compat.h>
  64. #include <linux/uaccess.h>
  65. #include <asm/unistd.h>
  66. #include <asm/pgtable.h>
  67. #include <asm/mmu_context.h>
  68. static void __unhash_process(struct task_struct *p, bool group_dead)
  69. {
  70. nr_threads--;
  71. detach_pid(p, PIDTYPE_PID);
  72. if (group_dead) {
  73. detach_pid(p, PIDTYPE_PGID);
  74. detach_pid(p, PIDTYPE_SID);
  75. list_del_rcu(&p->tasks);
  76. list_del_init(&p->sibling);
  77. __this_cpu_dec(process_counts);
  78. }
  79. list_del_rcu(&p->thread_group);
  80. list_del_rcu(&p->thread_node);
  81. }
  82. /*
  83. * This function expects the tasklist_lock write-locked.
  84. */
  85. static void __exit_signal(struct task_struct *tsk)
  86. {
  87. struct signal_struct *sig = tsk->signal;
  88. bool group_dead = thread_group_leader(tsk);
  89. struct sighand_struct *sighand;
  90. struct tty_struct *uninitialized_var(tty);
  91. u64 utime, stime;
  92. sighand = rcu_dereference_check(tsk->sighand,
  93. lockdep_tasklist_lock_is_held());
  94. spin_lock(&sighand->siglock);
  95. #ifdef CONFIG_POSIX_TIMERS
  96. posix_cpu_timers_exit(tsk);
  97. if (group_dead) {
  98. posix_cpu_timers_exit_group(tsk);
  99. } else {
  100. /*
  101. * This can only happen if the caller is de_thread().
  102. * FIXME: this is the temporary hack, we should teach
  103. * posix-cpu-timers to handle this case correctly.
  104. */
  105. if (unlikely(has_group_leader_pid(tsk)))
  106. posix_cpu_timers_exit_group(tsk);
  107. }
  108. #endif
  109. if (group_dead) {
  110. tty = sig->tty;
  111. sig->tty = NULL;
  112. } else {
  113. /*
  114. * If there is any task waiting for the group exit
  115. * then notify it:
  116. */
  117. if (sig->notify_count > 0 && !--sig->notify_count)
  118. wake_up_process(sig->group_exit_task);
  119. if (tsk == sig->curr_target)
  120. sig->curr_target = next_thread(tsk);
  121. }
  122. add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
  123. sizeof(unsigned long long));
  124. /*
  125. * Accumulate here the counters for all threads as they die. We could
  126. * skip the group leader because it is the last user of signal_struct,
  127. * but we want to avoid the race with thread_group_cputime() which can
  128. * see the empty ->thread_head list.
  129. */
  130. task_cputime(tsk, &utime, &stime);
  131. write_seqlock(&sig->stats_lock);
  132. sig->utime += utime;
  133. sig->stime += stime;
  134. sig->gtime += task_gtime(tsk);
  135. sig->min_flt += tsk->min_flt;
  136. sig->maj_flt += tsk->maj_flt;
  137. sig->nvcsw += tsk->nvcsw;
  138. sig->nivcsw += tsk->nivcsw;
  139. sig->inblock += task_io_get_inblock(tsk);
  140. sig->oublock += task_io_get_oublock(tsk);
  141. task_io_accounting_add(&sig->ioac, &tsk->ioac);
  142. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  143. sig->nr_threads--;
  144. __unhash_process(tsk, group_dead);
  145. write_sequnlock(&sig->stats_lock);
  146. /*
  147. * Do this under ->siglock, we can race with another thread
  148. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  149. */
  150. flush_sigqueue(&tsk->pending);
  151. tsk->sighand = NULL;
  152. spin_unlock(&sighand->siglock);
  153. __cleanup_sighand(sighand);
  154. clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
  155. if (group_dead) {
  156. flush_sigqueue(&sig->shared_pending);
  157. tty_kref_put(tty);
  158. }
  159. }
  160. static void delayed_put_task_struct(struct rcu_head *rhp)
  161. {
  162. struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  163. perf_event_delayed_put(tsk);
  164. trace_sched_process_free(tsk);
  165. put_task_struct(tsk);
  166. }
  167. void release_task(struct task_struct *p)
  168. {
  169. struct task_struct *leader;
  170. int zap_leader;
  171. repeat:
  172. /* don't need to get the RCU readlock here - the process is dead and
  173. * can't be modifying its own credentials. But shut RCU-lockdep up */
  174. rcu_read_lock();
  175. atomic_dec(&__task_cred(p)->user->processes);
  176. rcu_read_unlock();
  177. proc_flush_task(p);
  178. write_lock_irq(&tasklist_lock);
  179. ptrace_release_task(p);
  180. __exit_signal(p);
  181. /*
  182. * If we are the last non-leader member of the thread
  183. * group, and the leader is zombie, then notify the
  184. * group leader's parent process. (if it wants notification.)
  185. */
  186. zap_leader = 0;
  187. leader = p->group_leader;
  188. if (leader != p && thread_group_empty(leader)
  189. && leader->exit_state == EXIT_ZOMBIE) {
  190. /*
  191. * If we were the last child thread and the leader has
  192. * exited already, and the leader's parent ignores SIGCHLD,
  193. * then we are the one who should release the leader.
  194. */
  195. zap_leader = do_notify_parent(leader, leader->exit_signal);
  196. if (zap_leader)
  197. leader->exit_state = EXIT_DEAD;
  198. }
  199. write_unlock_irq(&tasklist_lock);
  200. release_thread(p);
  201. call_rcu(&p->rcu, delayed_put_task_struct);
  202. p = leader;
  203. if (unlikely(zap_leader))
  204. goto repeat;
  205. }
  206. /*
  207. * Note that if this function returns a valid task_struct pointer (!NULL)
  208. * task->usage must remain >0 for the duration of the RCU critical section.
  209. */
  210. struct task_struct *task_rcu_dereference(struct task_struct **ptask)
  211. {
  212. struct sighand_struct *sighand;
  213. struct task_struct *task;
  214. /*
  215. * We need to verify that release_task() was not called and thus
  216. * delayed_put_task_struct() can't run and drop the last reference
  217. * before rcu_read_unlock(). We check task->sighand != NULL,
  218. * but we can read the already freed and reused memory.
  219. */
  220. retry:
  221. task = rcu_dereference(*ptask);
  222. if (!task)
  223. return NULL;
  224. probe_kernel_address(&task->sighand, sighand);
  225. /*
  226. * Pairs with atomic_dec_and_test() in put_task_struct(). If this task
  227. * was already freed we can not miss the preceding update of this
  228. * pointer.
  229. */
  230. smp_rmb();
  231. if (unlikely(task != READ_ONCE(*ptask)))
  232. goto retry;
  233. /*
  234. * We've re-checked that "task == *ptask", now we have two different
  235. * cases:
  236. *
  237. * 1. This is actually the same task/task_struct. In this case
  238. * sighand != NULL tells us it is still alive.
  239. *
  240. * 2. This is another task which got the same memory for task_struct.
  241. * We can't know this of course, and we can not trust
  242. * sighand != NULL.
  243. *
  244. * In this case we actually return a random value, but this is
  245. * correct.
  246. *
  247. * If we return NULL - we can pretend that we actually noticed that
  248. * *ptask was updated when the previous task has exited. Or pretend
  249. * that probe_slab_address(&sighand) reads NULL.
  250. *
  251. * If we return the new task (because sighand is not NULL for any
  252. * reason) - this is fine too. This (new) task can't go away before
  253. * another gp pass.
  254. *
  255. * And note: We could even eliminate the false positive if re-read
  256. * task->sighand once again to avoid the falsely NULL. But this case
  257. * is very unlikely so we don't care.
  258. */
  259. if (!sighand)
  260. return NULL;
  261. return task;
  262. }
  263. void rcuwait_wake_up(struct rcuwait *w)
  264. {
  265. struct task_struct *task;
  266. rcu_read_lock();
  267. /*
  268. * Order condition vs @task, such that everything prior to the load
  269. * of @task is visible. This is the condition as to why the user called
  270. * rcuwait_trywake() in the first place. Pairs with set_current_state()
  271. * barrier (A) in rcuwait_wait_event().
  272. *
  273. * WAIT WAKE
  274. * [S] tsk = current [S] cond = true
  275. * MB (A) MB (B)
  276. * [L] cond [L] tsk
  277. */
  278. smp_rmb(); /* (B) */
  279. /*
  280. * Avoid using task_rcu_dereference() magic as long as we are careful,
  281. * see comment in rcuwait_wait_event() regarding ->exit_state.
  282. */
  283. task = rcu_dereference(w->task);
  284. if (task)
  285. wake_up_process(task);
  286. rcu_read_unlock();
  287. }
  288. /*
  289. * Determine if a process group is "orphaned", according to the POSIX
  290. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  291. * by terminal-generated stop signals. Newly orphaned process groups are
  292. * to receive a SIGHUP and a SIGCONT.
  293. *
  294. * "I ask you, have you ever known what it is to be an orphan?"
  295. */
  296. static int will_become_orphaned_pgrp(struct pid *pgrp,
  297. struct task_struct *ignored_task)
  298. {
  299. struct task_struct *p;
  300. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  301. if ((p == ignored_task) ||
  302. (p->exit_state && thread_group_empty(p)) ||
  303. is_global_init(p->real_parent))
  304. continue;
  305. if (task_pgrp(p->real_parent) != pgrp &&
  306. task_session(p->real_parent) == task_session(p))
  307. return 0;
  308. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  309. return 1;
  310. }
  311. int is_current_pgrp_orphaned(void)
  312. {
  313. int retval;
  314. read_lock(&tasklist_lock);
  315. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  316. read_unlock(&tasklist_lock);
  317. return retval;
  318. }
  319. static bool has_stopped_jobs(struct pid *pgrp)
  320. {
  321. struct task_struct *p;
  322. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  323. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  324. return true;
  325. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  326. return false;
  327. }
  328. /*
  329. * Check to see if any process groups have become orphaned as
  330. * a result of our exiting, and if they have any stopped jobs,
  331. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  332. */
  333. static void
  334. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  335. {
  336. struct pid *pgrp = task_pgrp(tsk);
  337. struct task_struct *ignored_task = tsk;
  338. if (!parent)
  339. /* exit: our father is in a different pgrp than
  340. * we are and we were the only connection outside.
  341. */
  342. parent = tsk->real_parent;
  343. else
  344. /* reparent: our child is in a different pgrp than
  345. * we are, and it was the only connection outside.
  346. */
  347. ignored_task = NULL;
  348. if (task_pgrp(parent) != pgrp &&
  349. task_session(parent) == task_session(tsk) &&
  350. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  351. has_stopped_jobs(pgrp)) {
  352. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  353. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  354. }
  355. }
  356. #ifdef CONFIG_MEMCG
  357. /*
  358. * A task is exiting. If it owned this mm, find a new owner for the mm.
  359. */
  360. void mm_update_next_owner(struct mm_struct *mm)
  361. {
  362. struct task_struct *c, *g, *p = current;
  363. retry:
  364. /*
  365. * If the exiting or execing task is not the owner, it's
  366. * someone else's problem.
  367. */
  368. if (mm->owner != p)
  369. return;
  370. /*
  371. * The current owner is exiting/execing and there are no other
  372. * candidates. Do not leave the mm pointing to a possibly
  373. * freed task structure.
  374. */
  375. if (atomic_read(&mm->mm_users) <= 1) {
  376. mm->owner = NULL;
  377. return;
  378. }
  379. read_lock(&tasklist_lock);
  380. /*
  381. * Search in the children
  382. */
  383. list_for_each_entry(c, &p->children, sibling) {
  384. if (c->mm == mm)
  385. goto assign_new_owner;
  386. }
  387. /*
  388. * Search in the siblings
  389. */
  390. list_for_each_entry(c, &p->real_parent->children, sibling) {
  391. if (c->mm == mm)
  392. goto assign_new_owner;
  393. }
  394. /*
  395. * Search through everything else, we should not get here often.
  396. */
  397. for_each_process(g) {
  398. if (g->flags & PF_KTHREAD)
  399. continue;
  400. for_each_thread(g, c) {
  401. if (c->mm == mm)
  402. goto assign_new_owner;
  403. if (c->mm)
  404. break;
  405. }
  406. }
  407. read_unlock(&tasklist_lock);
  408. /*
  409. * We found no owner yet mm_users > 1: this implies that we are
  410. * most likely racing with swapoff (try_to_unuse()) or /proc or
  411. * ptrace or page migration (get_task_mm()). Mark owner as NULL.
  412. */
  413. mm->owner = NULL;
  414. return;
  415. assign_new_owner:
  416. BUG_ON(c == p);
  417. get_task_struct(c);
  418. /*
  419. * The task_lock protects c->mm from changing.
  420. * We always want mm->owner->mm == mm
  421. */
  422. task_lock(c);
  423. /*
  424. * Delay read_unlock() till we have the task_lock()
  425. * to ensure that c does not slip away underneath us
  426. */
  427. read_unlock(&tasklist_lock);
  428. if (c->mm != mm) {
  429. task_unlock(c);
  430. put_task_struct(c);
  431. goto retry;
  432. }
  433. mm->owner = c;
  434. task_unlock(c);
  435. put_task_struct(c);
  436. }
  437. #endif /* CONFIG_MEMCG */
  438. /*
  439. * Turn us into a lazy TLB process if we
  440. * aren't already..
  441. */
  442. static void exit_mm(void)
  443. {
  444. struct mm_struct *mm = current->mm;
  445. struct core_state *core_state;
  446. mm_release(current, mm);
  447. if (!mm)
  448. return;
  449. sync_mm_rss(mm);
  450. /*
  451. * Serialize with any possible pending coredump.
  452. * We must hold mmap_sem around checking core_state
  453. * and clearing tsk->mm. The core-inducing thread
  454. * will increment ->nr_threads for each thread in the
  455. * group with ->mm != NULL.
  456. */
  457. down_read(&mm->mmap_sem);
  458. core_state = mm->core_state;
  459. if (core_state) {
  460. struct core_thread self;
  461. up_read(&mm->mmap_sem);
  462. self.task = current;
  463. self.next = xchg(&core_state->dumper.next, &self);
  464. /*
  465. * Implies mb(), the result of xchg() must be visible
  466. * to core_state->dumper.
  467. */
  468. if (atomic_dec_and_test(&core_state->nr_threads))
  469. complete(&core_state->startup);
  470. for (;;) {
  471. set_current_state(TASK_UNINTERRUPTIBLE);
  472. if (!self.task) /* see coredump_finish() */
  473. break;
  474. freezable_schedule();
  475. }
  476. __set_current_state(TASK_RUNNING);
  477. down_read(&mm->mmap_sem);
  478. }
  479. mmgrab(mm);
  480. BUG_ON(mm != current->active_mm);
  481. /* more a memory barrier than a real lock */
  482. task_lock(current);
  483. current->mm = NULL;
  484. up_read(&mm->mmap_sem);
  485. enter_lazy_tlb(mm, current);
  486. task_unlock(current);
  487. mm_update_next_owner(mm);
  488. mmput(mm);
  489. if (test_thread_flag(TIF_MEMDIE))
  490. exit_oom_victim();
  491. }
  492. static struct task_struct *find_alive_thread(struct task_struct *p)
  493. {
  494. struct task_struct *t;
  495. for_each_thread(p, t) {
  496. if (!(t->flags & PF_EXITING))
  497. return t;
  498. }
  499. return NULL;
  500. }
  501. static struct task_struct *find_child_reaper(struct task_struct *father)
  502. __releases(&tasklist_lock)
  503. __acquires(&tasklist_lock)
  504. {
  505. struct pid_namespace *pid_ns = task_active_pid_ns(father);
  506. struct task_struct *reaper = pid_ns->child_reaper;
  507. if (likely(reaper != father))
  508. return reaper;
  509. reaper = find_alive_thread(father);
  510. if (reaper) {
  511. pid_ns->child_reaper = reaper;
  512. return reaper;
  513. }
  514. write_unlock_irq(&tasklist_lock);
  515. if (unlikely(pid_ns == &init_pid_ns)) {
  516. panic("Attempted to kill init! exitcode=0x%08x\n",
  517. father->signal->group_exit_code ?: father->exit_code);
  518. }
  519. zap_pid_ns_processes(pid_ns);
  520. write_lock_irq(&tasklist_lock);
  521. return father;
  522. }
  523. /*
  524. * When we die, we re-parent all our children, and try to:
  525. * 1. give them to another thread in our thread group, if such a member exists
  526. * 2. give it to the first ancestor process which prctl'd itself as a
  527. * child_subreaper for its children (like a service manager)
  528. * 3. give it to the init process (PID 1) in our pid namespace
  529. */
  530. static struct task_struct *find_new_reaper(struct task_struct *father,
  531. struct task_struct *child_reaper)
  532. {
  533. struct task_struct *thread, *reaper;
  534. thread = find_alive_thread(father);
  535. if (thread)
  536. return thread;
  537. if (father->signal->has_child_subreaper) {
  538. unsigned int ns_level = task_pid(father)->level;
  539. /*
  540. * Find the first ->is_child_subreaper ancestor in our pid_ns.
  541. * We can't check reaper != child_reaper to ensure we do not
  542. * cross the namespaces, the exiting parent could be injected
  543. * by setns() + fork().
  544. * We check pid->level, this is slightly more efficient than
  545. * task_active_pid_ns(reaper) != task_active_pid_ns(father).
  546. */
  547. for (reaper = father->real_parent;
  548. task_pid(reaper)->level == ns_level;
  549. reaper = reaper->real_parent) {
  550. if (reaper == &init_task)
  551. break;
  552. if (!reaper->signal->is_child_subreaper)
  553. continue;
  554. thread = find_alive_thread(reaper);
  555. if (thread)
  556. return thread;
  557. }
  558. }
  559. return child_reaper;
  560. }
  561. /*
  562. * Any that need to be release_task'd are put on the @dead list.
  563. */
  564. static void reparent_leader(struct task_struct *father, struct task_struct *p,
  565. struct list_head *dead)
  566. {
  567. if (unlikely(p->exit_state == EXIT_DEAD))
  568. return;
  569. /* We don't want people slaying init. */
  570. p->exit_signal = SIGCHLD;
  571. /* If it has exited notify the new parent about this child's death. */
  572. if (!p->ptrace &&
  573. p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
  574. if (do_notify_parent(p, p->exit_signal)) {
  575. p->exit_state = EXIT_DEAD;
  576. list_add(&p->ptrace_entry, dead);
  577. }
  578. }
  579. kill_orphaned_pgrp(p, father);
  580. }
  581. /*
  582. * This does two things:
  583. *
  584. * A. Make init inherit all the child processes
  585. * B. Check to see if any process groups have become orphaned
  586. * as a result of our exiting, and if they have any stopped
  587. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  588. */
  589. static void forget_original_parent(struct task_struct *father,
  590. struct list_head *dead)
  591. {
  592. struct task_struct *p, *t, *reaper;
  593. if (unlikely(!list_empty(&father->ptraced)))
  594. exit_ptrace(father, dead);
  595. /* Can drop and reacquire tasklist_lock */
  596. reaper = find_child_reaper(father);
  597. if (list_empty(&father->children))
  598. return;
  599. reaper = find_new_reaper(father, reaper);
  600. list_for_each_entry(p, &father->children, sibling) {
  601. for_each_thread(p, t) {
  602. t->real_parent = reaper;
  603. BUG_ON((!t->ptrace) != (t->parent == father));
  604. if (likely(!t->ptrace))
  605. t->parent = t->real_parent;
  606. if (t->pdeath_signal)
  607. group_send_sig_info(t->pdeath_signal,
  608. SEND_SIG_NOINFO, t);
  609. }
  610. /*
  611. * If this is a threaded reparent there is no need to
  612. * notify anyone anything has happened.
  613. */
  614. if (!same_thread_group(reaper, father))
  615. reparent_leader(father, p, dead);
  616. }
  617. list_splice_tail_init(&father->children, &reaper->children);
  618. }
  619. /*
  620. * Send signals to all our closest relatives so that they know
  621. * to properly mourn us..
  622. */
  623. static void exit_notify(struct task_struct *tsk, int group_dead)
  624. {
  625. bool autoreap;
  626. struct task_struct *p, *n;
  627. LIST_HEAD(dead);
  628. write_lock_irq(&tasklist_lock);
  629. forget_original_parent(tsk, &dead);
  630. if (group_dead)
  631. kill_orphaned_pgrp(tsk->group_leader, NULL);
  632. if (unlikely(tsk->ptrace)) {
  633. int sig = thread_group_leader(tsk) &&
  634. thread_group_empty(tsk) &&
  635. !ptrace_reparented(tsk) ?
  636. tsk->exit_signal : SIGCHLD;
  637. autoreap = do_notify_parent(tsk, sig);
  638. } else if (thread_group_leader(tsk)) {
  639. autoreap = thread_group_empty(tsk) &&
  640. do_notify_parent(tsk, tsk->exit_signal);
  641. } else {
  642. autoreap = true;
  643. }
  644. tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
  645. if (tsk->exit_state == EXIT_DEAD)
  646. list_add(&tsk->ptrace_entry, &dead);
  647. /* mt-exec, de_thread() is waiting for group leader */
  648. if (unlikely(tsk->signal->notify_count < 0))
  649. wake_up_process(tsk->signal->group_exit_task);
  650. write_unlock_irq(&tasklist_lock);
  651. list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
  652. list_del_init(&p->ptrace_entry);
  653. release_task(p);
  654. }
  655. }
  656. #ifdef CONFIG_DEBUG_STACK_USAGE
  657. static void check_stack_usage(void)
  658. {
  659. static DEFINE_SPINLOCK(low_water_lock);
  660. static int lowest_to_date = THREAD_SIZE;
  661. unsigned long free;
  662. free = stack_not_used(current);
  663. if (free >= lowest_to_date)
  664. return;
  665. spin_lock(&low_water_lock);
  666. if (free < lowest_to_date) {
  667. pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
  668. current->comm, task_pid_nr(current), free);
  669. lowest_to_date = free;
  670. }
  671. spin_unlock(&low_water_lock);
  672. }
  673. #else
  674. static inline void check_stack_usage(void) {}
  675. #endif
  676. void __noreturn do_exit(long code)
  677. {
  678. struct task_struct *tsk = current;
  679. int group_dead;
  680. TASKS_RCU(int tasks_rcu_i);
  681. profile_task_exit(tsk);
  682. kcov_task_exit(tsk);
  683. WARN_ON(blk_needs_flush_plug(tsk));
  684. if (unlikely(in_interrupt()))
  685. panic("Aiee, killing interrupt handler!");
  686. if (unlikely(!tsk->pid))
  687. panic("Attempted to kill the idle task!");
  688. /*
  689. * If do_exit is called because this processes oopsed, it's possible
  690. * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
  691. * continuing. Amongst other possible reasons, this is to prevent
  692. * mm_release()->clear_child_tid() from writing to a user-controlled
  693. * kernel address.
  694. */
  695. set_fs(USER_DS);
  696. ptrace_event(PTRACE_EVENT_EXIT, code);
  697. validate_creds_for_do_exit(tsk);
  698. /*
  699. * We're taking recursive faults here in do_exit. Safest is to just
  700. * leave this task alone and wait for reboot.
  701. */
  702. if (unlikely(tsk->flags & PF_EXITING)) {
  703. pr_alert("Fixing recursive fault but reboot is needed!\n");
  704. /*
  705. * We can do this unlocked here. The futex code uses
  706. * this flag just to verify whether the pi state
  707. * cleanup has been done or not. In the worst case it
  708. * loops once more. We pretend that the cleanup was
  709. * done as there is no way to return. Either the
  710. * OWNER_DIED bit is set by now or we push the blocked
  711. * task into the wait for ever nirwana as well.
  712. */
  713. tsk->flags |= PF_EXITPIDONE;
  714. set_current_state(TASK_UNINTERRUPTIBLE);
  715. schedule();
  716. }
  717. exit_signals(tsk); /* sets PF_EXITING */
  718. /*
  719. * Ensure that all new tsk->pi_lock acquisitions must observe
  720. * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
  721. */
  722. smp_mb();
  723. /*
  724. * Ensure that we must observe the pi_state in exit_mm() ->
  725. * mm_release() -> exit_pi_state_list().
  726. */
  727. raw_spin_unlock_wait(&tsk->pi_lock);
  728. if (unlikely(in_atomic())) {
  729. pr_info("note: %s[%d] exited with preempt_count %d\n",
  730. current->comm, task_pid_nr(current),
  731. preempt_count());
  732. preempt_count_set(PREEMPT_ENABLED);
  733. }
  734. /* sync mm's RSS info before statistics gathering */
  735. if (tsk->mm)
  736. sync_mm_rss(tsk->mm);
  737. acct_update_integrals(tsk);
  738. group_dead = atomic_dec_and_test(&tsk->signal->live);
  739. if (group_dead) {
  740. #ifdef CONFIG_POSIX_TIMERS
  741. hrtimer_cancel(&tsk->signal->real_timer);
  742. exit_itimers(tsk->signal);
  743. #endif
  744. if (tsk->mm)
  745. setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
  746. }
  747. acct_collect(code, group_dead);
  748. if (group_dead)
  749. tty_audit_exit();
  750. audit_free(tsk);
  751. tsk->exit_code = code;
  752. taskstats_exit(tsk, group_dead);
  753. exit_mm();
  754. if (group_dead)
  755. acct_process();
  756. trace_sched_process_exit(tsk);
  757. exit_sem(tsk);
  758. exit_shm(tsk);
  759. exit_files(tsk);
  760. exit_fs(tsk);
  761. if (group_dead)
  762. disassociate_ctty(1);
  763. exit_task_namespaces(tsk);
  764. exit_task_work(tsk);
  765. exit_thread(tsk);
  766. /*
  767. * Flush inherited counters to the parent - before the parent
  768. * gets woken up by child-exit notifications.
  769. *
  770. * because of cgroup mode, must be called before cgroup_exit()
  771. */
  772. perf_event_exit_task(tsk);
  773. sched_autogroup_exit_task(tsk);
  774. cgroup_exit(tsk);
  775. /*
  776. * FIXME: do that only when needed, using sched_exit tracepoint
  777. */
  778. flush_ptrace_hw_breakpoint(tsk);
  779. TASKS_RCU(preempt_disable());
  780. TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
  781. TASKS_RCU(preempt_enable());
  782. exit_notify(tsk, group_dead);
  783. proc_exit_connector(tsk);
  784. mpol_put_task_policy(tsk);
  785. #ifdef CONFIG_FUTEX
  786. if (unlikely(current->pi_state_cache))
  787. kfree(current->pi_state_cache);
  788. #endif
  789. /*
  790. * Make sure we are holding no locks:
  791. */
  792. debug_check_no_locks_held();
  793. /*
  794. * We can do this unlocked here. The futex code uses this flag
  795. * just to verify whether the pi state cleanup has been done
  796. * or not. In the worst case it loops once more.
  797. */
  798. tsk->flags |= PF_EXITPIDONE;
  799. if (tsk->io_context)
  800. exit_io_context(tsk);
  801. if (tsk->splice_pipe)
  802. free_pipe_info(tsk->splice_pipe);
  803. if (tsk->task_frag.page)
  804. put_page(tsk->task_frag.page);
  805. validate_creds_for_do_exit(tsk);
  806. check_stack_usage();
  807. preempt_disable();
  808. if (tsk->nr_dirtied)
  809. __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
  810. exit_rcu();
  811. TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
  812. do_task_dead();
  813. }
  814. EXPORT_SYMBOL_GPL(do_exit);
  815. void complete_and_exit(struct completion *comp, long code)
  816. {
  817. if (comp)
  818. complete(comp);
  819. do_exit(code);
  820. }
  821. EXPORT_SYMBOL(complete_and_exit);
  822. SYSCALL_DEFINE1(exit, int, error_code)
  823. {
  824. do_exit((error_code&0xff)<<8);
  825. }
  826. /*
  827. * Take down every thread in the group. This is called by fatal signals
  828. * as well as by sys_exit_group (below).
  829. */
  830. void
  831. do_group_exit(int exit_code)
  832. {
  833. struct signal_struct *sig = current->signal;
  834. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  835. if (signal_group_exit(sig))
  836. exit_code = sig->group_exit_code;
  837. else if (!thread_group_empty(current)) {
  838. struct sighand_struct *const sighand = current->sighand;
  839. spin_lock_irq(&sighand->siglock);
  840. if (signal_group_exit(sig))
  841. /* Another thread got here before we took the lock. */
  842. exit_code = sig->group_exit_code;
  843. else {
  844. sig->group_exit_code = exit_code;
  845. sig->flags = SIGNAL_GROUP_EXIT;
  846. zap_other_threads(current);
  847. }
  848. spin_unlock_irq(&sighand->siglock);
  849. }
  850. do_exit(exit_code);
  851. /* NOTREACHED */
  852. }
  853. /*
  854. * this kills every thread in the thread group. Note that any externally
  855. * wait4()-ing process will get the correct exit code - even if this
  856. * thread is not the thread group leader.
  857. */
  858. SYSCALL_DEFINE1(exit_group, int, error_code)
  859. {
  860. do_group_exit((error_code & 0xff) << 8);
  861. /* NOTREACHED */
  862. return 0;
  863. }
  864. struct waitid_info {
  865. pid_t pid;
  866. uid_t uid;
  867. int status;
  868. int cause;
  869. };
  870. struct wait_opts {
  871. enum pid_type wo_type;
  872. int wo_flags;
  873. struct pid *wo_pid;
  874. struct waitid_info *wo_info;
  875. int wo_stat;
  876. struct rusage *wo_rusage;
  877. wait_queue_entry_t child_wait;
  878. int notask_error;
  879. };
  880. static inline
  881. struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  882. {
  883. if (type != PIDTYPE_PID)
  884. task = task->group_leader;
  885. return task->pids[type].pid;
  886. }
  887. static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
  888. {
  889. return wo->wo_type == PIDTYPE_MAX ||
  890. task_pid_type(p, wo->wo_type) == wo->wo_pid;
  891. }
  892. static int
  893. eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
  894. {
  895. if (!eligible_pid(wo, p))
  896. return 0;
  897. /*
  898. * Wait for all children (clone and not) if __WALL is set or
  899. * if it is traced by us.
  900. */
  901. if (ptrace || (wo->wo_flags & __WALL))
  902. return 1;
  903. /*
  904. * Otherwise, wait for clone children *only* if __WCLONE is set;
  905. * otherwise, wait for non-clone children *only*.
  906. *
  907. * Note: a "clone" child here is one that reports to its parent
  908. * using a signal other than SIGCHLD, or a non-leader thread which
  909. * we can only see if it is traced by us.
  910. */
  911. if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
  912. return 0;
  913. return 1;
  914. }
  915. /*
  916. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  917. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  918. * the lock and this task is uninteresting. If we return nonzero, we have
  919. * released the lock and the system call should return.
  920. */
  921. static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
  922. {
  923. int state, status;
  924. pid_t pid = task_pid_vnr(p);
  925. uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
  926. struct waitid_info *infop;
  927. if (!likely(wo->wo_flags & WEXITED))
  928. return 0;
  929. if (unlikely(wo->wo_flags & WNOWAIT)) {
  930. status = p->exit_code;
  931. get_task_struct(p);
  932. read_unlock(&tasklist_lock);
  933. sched_annotate_sleep();
  934. if (wo->wo_rusage)
  935. getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
  936. put_task_struct(p);
  937. goto out_info;
  938. }
  939. /*
  940. * Move the task's state to DEAD/TRACE, only one thread can do this.
  941. */
  942. state = (ptrace_reparented(p) && thread_group_leader(p)) ?
  943. EXIT_TRACE : EXIT_DEAD;
  944. if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
  945. return 0;
  946. /*
  947. * We own this thread, nobody else can reap it.
  948. */
  949. read_unlock(&tasklist_lock);
  950. sched_annotate_sleep();
  951. /*
  952. * Check thread_group_leader() to exclude the traced sub-threads.
  953. */
  954. if (state == EXIT_DEAD && thread_group_leader(p)) {
  955. struct signal_struct *sig = p->signal;
  956. struct signal_struct *psig = current->signal;
  957. unsigned long maxrss;
  958. u64 tgutime, tgstime;
  959. /*
  960. * The resource counters for the group leader are in its
  961. * own task_struct. Those for dead threads in the group
  962. * are in its signal_struct, as are those for the child
  963. * processes it has previously reaped. All these
  964. * accumulate in the parent's signal_struct c* fields.
  965. *
  966. * We don't bother to take a lock here to protect these
  967. * p->signal fields because the whole thread group is dead
  968. * and nobody can change them.
  969. *
  970. * psig->stats_lock also protects us from our sub-theads
  971. * which can reap other children at the same time. Until
  972. * we change k_getrusage()-like users to rely on this lock
  973. * we have to take ->siglock as well.
  974. *
  975. * We use thread_group_cputime_adjusted() to get times for
  976. * the thread group, which consolidates times for all threads
  977. * in the group including the group leader.
  978. */
  979. thread_group_cputime_adjusted(p, &tgutime, &tgstime);
  980. spin_lock_irq(&current->sighand->siglock);
  981. write_seqlock(&psig->stats_lock);
  982. psig->cutime += tgutime + sig->cutime;
  983. psig->cstime += tgstime + sig->cstime;
  984. psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
  985. psig->cmin_flt +=
  986. p->min_flt + sig->min_flt + sig->cmin_flt;
  987. psig->cmaj_flt +=
  988. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  989. psig->cnvcsw +=
  990. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  991. psig->cnivcsw +=
  992. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  993. psig->cinblock +=
  994. task_io_get_inblock(p) +
  995. sig->inblock + sig->cinblock;
  996. psig->coublock +=
  997. task_io_get_oublock(p) +
  998. sig->oublock + sig->coublock;
  999. maxrss = max(sig->maxrss, sig->cmaxrss);
  1000. if (psig->cmaxrss < maxrss)
  1001. psig->cmaxrss = maxrss;
  1002. task_io_accounting_add(&psig->ioac, &p->ioac);
  1003. task_io_accounting_add(&psig->ioac, &sig->ioac);
  1004. write_sequnlock(&psig->stats_lock);
  1005. spin_unlock_irq(&current->sighand->siglock);
  1006. }
  1007. if (wo->wo_rusage)
  1008. getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
  1009. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1010. ? p->signal->group_exit_code : p->exit_code;
  1011. wo->wo_stat = status;
  1012. if (state == EXIT_TRACE) {
  1013. write_lock_irq(&tasklist_lock);
  1014. /* We dropped tasklist, ptracer could die and untrace */
  1015. ptrace_unlink(p);
  1016. /* If parent wants a zombie, don't release it now */
  1017. state = EXIT_ZOMBIE;
  1018. if (do_notify_parent(p, p->exit_signal))
  1019. state = EXIT_DEAD;
  1020. p->exit_state = state;
  1021. write_unlock_irq(&tasklist_lock);
  1022. }
  1023. if (state == EXIT_DEAD)
  1024. release_task(p);
  1025. out_info:
  1026. infop = wo->wo_info;
  1027. if (infop) {
  1028. if ((status & 0x7f) == 0) {
  1029. infop->cause = CLD_EXITED;
  1030. infop->status = status >> 8;
  1031. } else {
  1032. infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1033. infop->status = status & 0x7f;
  1034. }
  1035. infop->pid = pid;
  1036. infop->uid = uid;
  1037. }
  1038. return pid;
  1039. }
  1040. static int *task_stopped_code(struct task_struct *p, bool ptrace)
  1041. {
  1042. if (ptrace) {
  1043. if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
  1044. return &p->exit_code;
  1045. } else {
  1046. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  1047. return &p->signal->group_exit_code;
  1048. }
  1049. return NULL;
  1050. }
  1051. /**
  1052. * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
  1053. * @wo: wait options
  1054. * @ptrace: is the wait for ptrace
  1055. * @p: task to wait for
  1056. *
  1057. * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
  1058. *
  1059. * CONTEXT:
  1060. * read_lock(&tasklist_lock), which is released if return value is
  1061. * non-zero. Also, grabs and releases @p->sighand->siglock.
  1062. *
  1063. * RETURNS:
  1064. * 0 if wait condition didn't exist and search for other wait conditions
  1065. * should continue. Non-zero return, -errno on failure and @p's pid on
  1066. * success, implies that tasklist_lock is released and wait condition
  1067. * search should terminate.
  1068. */
  1069. static int wait_task_stopped(struct wait_opts *wo,
  1070. int ptrace, struct task_struct *p)
  1071. {
  1072. struct waitid_info *infop;
  1073. int exit_code, *p_code, why;
  1074. uid_t uid = 0; /* unneeded, required by compiler */
  1075. pid_t pid;
  1076. /*
  1077. * Traditionally we see ptrace'd stopped tasks regardless of options.
  1078. */
  1079. if (!ptrace && !(wo->wo_flags & WUNTRACED))
  1080. return 0;
  1081. if (!task_stopped_code(p, ptrace))
  1082. return 0;
  1083. exit_code = 0;
  1084. spin_lock_irq(&p->sighand->siglock);
  1085. p_code = task_stopped_code(p, ptrace);
  1086. if (unlikely(!p_code))
  1087. goto unlock_sig;
  1088. exit_code = *p_code;
  1089. if (!exit_code)
  1090. goto unlock_sig;
  1091. if (!unlikely(wo->wo_flags & WNOWAIT))
  1092. *p_code = 0;
  1093. uid = from_kuid_munged(current_user_ns(), task_uid(p));
  1094. unlock_sig:
  1095. spin_unlock_irq(&p->sighand->siglock);
  1096. if (!exit_code)
  1097. return 0;
  1098. /*
  1099. * Now we are pretty sure this task is interesting.
  1100. * Make sure it doesn't get reaped out from under us while we
  1101. * give up the lock and then examine it below. We don't want to
  1102. * keep holding onto the tasklist_lock while we call getrusage and
  1103. * possibly take page faults for user memory.
  1104. */
  1105. get_task_struct(p);
  1106. pid = task_pid_vnr(p);
  1107. why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
  1108. read_unlock(&tasklist_lock);
  1109. sched_annotate_sleep();
  1110. if (wo->wo_rusage)
  1111. getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
  1112. put_task_struct(p);
  1113. if (likely(!(wo->wo_flags & WNOWAIT)))
  1114. wo->wo_stat = (exit_code << 8) | 0x7f;
  1115. infop = wo->wo_info;
  1116. if (infop) {
  1117. infop->cause = why;
  1118. infop->status = exit_code;
  1119. infop->pid = pid;
  1120. infop->uid = uid;
  1121. }
  1122. return pid;
  1123. }
  1124. /*
  1125. * Handle do_wait work for one task in a live, non-stopped state.
  1126. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1127. * the lock and this task is uninteresting. If we return nonzero, we have
  1128. * released the lock and the system call should return.
  1129. */
  1130. static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
  1131. {
  1132. struct waitid_info *infop;
  1133. pid_t pid;
  1134. uid_t uid;
  1135. if (!unlikely(wo->wo_flags & WCONTINUED))
  1136. return 0;
  1137. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1138. return 0;
  1139. spin_lock_irq(&p->sighand->siglock);
  1140. /* Re-check with the lock held. */
  1141. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1142. spin_unlock_irq(&p->sighand->siglock);
  1143. return 0;
  1144. }
  1145. if (!unlikely(wo->wo_flags & WNOWAIT))
  1146. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1147. uid = from_kuid_munged(current_user_ns(), task_uid(p));
  1148. spin_unlock_irq(&p->sighand->siglock);
  1149. pid = task_pid_vnr(p);
  1150. get_task_struct(p);
  1151. read_unlock(&tasklist_lock);
  1152. sched_annotate_sleep();
  1153. if (wo->wo_rusage)
  1154. getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
  1155. put_task_struct(p);
  1156. infop = wo->wo_info;
  1157. if (!infop) {
  1158. wo->wo_stat = 0xffff;
  1159. } else {
  1160. infop->cause = CLD_CONTINUED;
  1161. infop->pid = pid;
  1162. infop->uid = uid;
  1163. infop->status = SIGCONT;
  1164. }
  1165. return pid;
  1166. }
  1167. /*
  1168. * Consider @p for a wait by @parent.
  1169. *
  1170. * -ECHILD should be in ->notask_error before the first call.
  1171. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1172. * Returns zero if the search for a child should continue;
  1173. * then ->notask_error is 0 if @p is an eligible child,
  1174. * or still -ECHILD.
  1175. */
  1176. static int wait_consider_task(struct wait_opts *wo, int ptrace,
  1177. struct task_struct *p)
  1178. {
  1179. /*
  1180. * We can race with wait_task_zombie() from another thread.
  1181. * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
  1182. * can't confuse the checks below.
  1183. */
  1184. int exit_state = ACCESS_ONCE(p->exit_state);
  1185. int ret;
  1186. if (unlikely(exit_state == EXIT_DEAD))
  1187. return 0;
  1188. ret = eligible_child(wo, ptrace, p);
  1189. if (!ret)
  1190. return ret;
  1191. if (unlikely(exit_state == EXIT_TRACE)) {
  1192. /*
  1193. * ptrace == 0 means we are the natural parent. In this case
  1194. * we should clear notask_error, debugger will notify us.
  1195. */
  1196. if (likely(!ptrace))
  1197. wo->notask_error = 0;
  1198. return 0;
  1199. }
  1200. if (likely(!ptrace) && unlikely(p->ptrace)) {
  1201. /*
  1202. * If it is traced by its real parent's group, just pretend
  1203. * the caller is ptrace_do_wait() and reap this child if it
  1204. * is zombie.
  1205. *
  1206. * This also hides group stop state from real parent; otherwise
  1207. * a single stop can be reported twice as group and ptrace stop.
  1208. * If a ptracer wants to distinguish these two events for its
  1209. * own children it should create a separate process which takes
  1210. * the role of real parent.
  1211. */
  1212. if (!ptrace_reparented(p))
  1213. ptrace = 1;
  1214. }
  1215. /* slay zombie? */
  1216. if (exit_state == EXIT_ZOMBIE) {
  1217. /* we don't reap group leaders with subthreads */
  1218. if (!delay_group_leader(p)) {
  1219. /*
  1220. * A zombie ptracee is only visible to its ptracer.
  1221. * Notification and reaping will be cascaded to the
  1222. * real parent when the ptracer detaches.
  1223. */
  1224. if (unlikely(ptrace) || likely(!p->ptrace))
  1225. return wait_task_zombie(wo, p);
  1226. }
  1227. /*
  1228. * Allow access to stopped/continued state via zombie by
  1229. * falling through. Clearing of notask_error is complex.
  1230. *
  1231. * When !@ptrace:
  1232. *
  1233. * If WEXITED is set, notask_error should naturally be
  1234. * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
  1235. * so, if there are live subthreads, there are events to
  1236. * wait for. If all subthreads are dead, it's still safe
  1237. * to clear - this function will be called again in finite
  1238. * amount time once all the subthreads are released and
  1239. * will then return without clearing.
  1240. *
  1241. * When @ptrace:
  1242. *
  1243. * Stopped state is per-task and thus can't change once the
  1244. * target task dies. Only continued and exited can happen.
  1245. * Clear notask_error if WCONTINUED | WEXITED.
  1246. */
  1247. if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
  1248. wo->notask_error = 0;
  1249. } else {
  1250. /*
  1251. * @p is alive and it's gonna stop, continue or exit, so
  1252. * there always is something to wait for.
  1253. */
  1254. wo->notask_error = 0;
  1255. }
  1256. /*
  1257. * Wait for stopped. Depending on @ptrace, different stopped state
  1258. * is used and the two don't interact with each other.
  1259. */
  1260. ret = wait_task_stopped(wo, ptrace, p);
  1261. if (ret)
  1262. return ret;
  1263. /*
  1264. * Wait for continued. There's only one continued state and the
  1265. * ptracer can consume it which can confuse the real parent. Don't
  1266. * use WCONTINUED from ptracer. You don't need or want it.
  1267. */
  1268. return wait_task_continued(wo, p);
  1269. }
  1270. /*
  1271. * Do the work of do_wait() for one thread in the group, @tsk.
  1272. *
  1273. * -ECHILD should be in ->notask_error before the first call.
  1274. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1275. * Returns zero if the search for a child should continue; then
  1276. * ->notask_error is 0 if there were any eligible children,
  1277. * or still -ECHILD.
  1278. */
  1279. static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
  1280. {
  1281. struct task_struct *p;
  1282. list_for_each_entry(p, &tsk->children, sibling) {
  1283. int ret = wait_consider_task(wo, 0, p);
  1284. if (ret)
  1285. return ret;
  1286. }
  1287. return 0;
  1288. }
  1289. static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
  1290. {
  1291. struct task_struct *p;
  1292. list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
  1293. int ret = wait_consider_task(wo, 1, p);
  1294. if (ret)
  1295. return ret;
  1296. }
  1297. return 0;
  1298. }
  1299. static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
  1300. int sync, void *key)
  1301. {
  1302. struct wait_opts *wo = container_of(wait, struct wait_opts,
  1303. child_wait);
  1304. struct task_struct *p = key;
  1305. if (!eligible_pid(wo, p))
  1306. return 0;
  1307. if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
  1308. return 0;
  1309. return default_wake_function(wait, mode, sync, key);
  1310. }
  1311. void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
  1312. {
  1313. __wake_up_sync_key(&parent->signal->wait_chldexit,
  1314. TASK_INTERRUPTIBLE, 1, p);
  1315. }
  1316. static long do_wait(struct wait_opts *wo)
  1317. {
  1318. struct task_struct *tsk;
  1319. int retval;
  1320. trace_sched_process_wait(wo->wo_pid);
  1321. init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
  1322. wo->child_wait.private = current;
  1323. add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1324. repeat:
  1325. /*
  1326. * If there is nothing that can match our criteria, just get out.
  1327. * We will clear ->notask_error to zero if we see any child that
  1328. * might later match our criteria, even if we are not able to reap
  1329. * it yet.
  1330. */
  1331. wo->notask_error = -ECHILD;
  1332. if ((wo->wo_type < PIDTYPE_MAX) &&
  1333. (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
  1334. goto notask;
  1335. set_current_state(TASK_INTERRUPTIBLE);
  1336. read_lock(&tasklist_lock);
  1337. tsk = current;
  1338. do {
  1339. retval = do_wait_thread(wo, tsk);
  1340. if (retval)
  1341. goto end;
  1342. retval = ptrace_do_wait(wo, tsk);
  1343. if (retval)
  1344. goto end;
  1345. if (wo->wo_flags & __WNOTHREAD)
  1346. break;
  1347. } while_each_thread(current, tsk);
  1348. read_unlock(&tasklist_lock);
  1349. notask:
  1350. retval = wo->notask_error;
  1351. if (!retval && !(wo->wo_flags & WNOHANG)) {
  1352. retval = -ERESTARTSYS;
  1353. if (!signal_pending(current)) {
  1354. schedule();
  1355. goto repeat;
  1356. }
  1357. }
  1358. end:
  1359. __set_current_state(TASK_RUNNING);
  1360. remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1361. return retval;
  1362. }
  1363. static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
  1364. int options, struct rusage *ru)
  1365. {
  1366. struct wait_opts wo;
  1367. struct pid *pid = NULL;
  1368. enum pid_type type;
  1369. long ret;
  1370. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
  1371. __WNOTHREAD|__WCLONE|__WALL))
  1372. return -EINVAL;
  1373. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1374. return -EINVAL;
  1375. switch (which) {
  1376. case P_ALL:
  1377. type = PIDTYPE_MAX;
  1378. break;
  1379. case P_PID:
  1380. type = PIDTYPE_PID;
  1381. if (upid <= 0)
  1382. return -EINVAL;
  1383. break;
  1384. case P_PGID:
  1385. type = PIDTYPE_PGID;
  1386. if (upid <= 0)
  1387. return -EINVAL;
  1388. break;
  1389. default:
  1390. return -EINVAL;
  1391. }
  1392. if (type < PIDTYPE_MAX)
  1393. pid = find_get_pid(upid);
  1394. wo.wo_type = type;
  1395. wo.wo_pid = pid;
  1396. wo.wo_flags = options;
  1397. wo.wo_info = infop;
  1398. wo.wo_rusage = ru;
  1399. ret = do_wait(&wo);
  1400. put_pid(pid);
  1401. return ret;
  1402. }
  1403. SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
  1404. infop, int, options, struct rusage __user *, ru)
  1405. {
  1406. struct rusage r;
  1407. struct waitid_info info = {.status = 0};
  1408. long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
  1409. int signo = 0;
  1410. if (err > 0) {
  1411. signo = SIGCHLD;
  1412. err = 0;
  1413. }
  1414. if (!err) {
  1415. if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
  1416. return -EFAULT;
  1417. }
  1418. if (!infop)
  1419. return err;
  1420. user_access_begin();
  1421. unsafe_put_user(signo, &infop->si_signo, Efault);
  1422. unsafe_put_user(0, &infop->si_errno, Efault);
  1423. unsafe_put_user((short)info.cause, &infop->si_code, Efault);
  1424. unsafe_put_user(info.pid, &infop->si_pid, Efault);
  1425. unsafe_put_user(info.uid, &infop->si_uid, Efault);
  1426. unsafe_put_user(info.status, &infop->si_status, Efault);
  1427. user_access_end();
  1428. return err;
  1429. Efault:
  1430. user_access_end();
  1431. return -EFAULT;
  1432. }
  1433. long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
  1434. struct rusage *ru)
  1435. {
  1436. struct wait_opts wo;
  1437. struct pid *pid = NULL;
  1438. enum pid_type type;
  1439. long ret;
  1440. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1441. __WNOTHREAD|__WCLONE|__WALL))
  1442. return -EINVAL;
  1443. /* -INT_MIN is not defined */
  1444. if (upid == INT_MIN)
  1445. return -ESRCH;
  1446. if (upid == -1)
  1447. type = PIDTYPE_MAX;
  1448. else if (upid < 0) {
  1449. type = PIDTYPE_PGID;
  1450. pid = find_get_pid(-upid);
  1451. } else if (upid == 0) {
  1452. type = PIDTYPE_PGID;
  1453. pid = get_task_pid(current, PIDTYPE_PGID);
  1454. } else /* upid > 0 */ {
  1455. type = PIDTYPE_PID;
  1456. pid = find_get_pid(upid);
  1457. }
  1458. wo.wo_type = type;
  1459. wo.wo_pid = pid;
  1460. wo.wo_flags = options | WEXITED;
  1461. wo.wo_info = NULL;
  1462. wo.wo_stat = 0;
  1463. wo.wo_rusage = ru;
  1464. ret = do_wait(&wo);
  1465. put_pid(pid);
  1466. if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
  1467. ret = -EFAULT;
  1468. return ret;
  1469. }
  1470. SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  1471. int, options, struct rusage __user *, ru)
  1472. {
  1473. struct rusage r;
  1474. long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
  1475. if (err > 0) {
  1476. if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
  1477. return -EFAULT;
  1478. }
  1479. return err;
  1480. }
  1481. #ifdef __ARCH_WANT_SYS_WAITPID
  1482. /*
  1483. * sys_waitpid() remains for compatibility. waitpid() should be
  1484. * implemented by calling sys_wait4() from libc.a.
  1485. */
  1486. SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
  1487. {
  1488. return sys_wait4(pid, stat_addr, options, NULL);
  1489. }
  1490. #endif
  1491. #ifdef CONFIG_COMPAT
  1492. COMPAT_SYSCALL_DEFINE4(wait4,
  1493. compat_pid_t, pid,
  1494. compat_uint_t __user *, stat_addr,
  1495. int, options,
  1496. struct compat_rusage __user *, ru)
  1497. {
  1498. struct rusage r;
  1499. long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
  1500. if (err > 0) {
  1501. if (ru && put_compat_rusage(&r, ru))
  1502. return -EFAULT;
  1503. }
  1504. return err;
  1505. }
  1506. COMPAT_SYSCALL_DEFINE5(waitid,
  1507. int, which, compat_pid_t, pid,
  1508. struct compat_siginfo __user *, infop, int, options,
  1509. struct compat_rusage __user *, uru)
  1510. {
  1511. struct rusage ru;
  1512. struct waitid_info info = {.status = 0};
  1513. long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
  1514. int signo = 0;
  1515. if (err > 0) {
  1516. signo = SIGCHLD;
  1517. err = 0;
  1518. }
  1519. if (!err && uru) {
  1520. /* kernel_waitid() overwrites everything in ru */
  1521. if (COMPAT_USE_64BIT_TIME)
  1522. err = copy_to_user(uru, &ru, sizeof(ru));
  1523. else
  1524. err = put_compat_rusage(&ru, uru);
  1525. if (err)
  1526. return -EFAULT;
  1527. }
  1528. if (!infop)
  1529. return err;
  1530. user_access_begin();
  1531. unsafe_put_user(signo, &infop->si_signo, Efault);
  1532. unsafe_put_user(0, &infop->si_errno, Efault);
  1533. unsafe_put_user((short)info.cause, &infop->si_code, Efault);
  1534. unsafe_put_user(info.pid, &infop->si_pid, Efault);
  1535. unsafe_put_user(info.uid, &infop->si_uid, Efault);
  1536. unsafe_put_user(info.status, &infop->si_status, Efault);
  1537. user_access_end();
  1538. return err;
  1539. Efault:
  1540. user_access_end();
  1541. return -EFAULT;
  1542. }
  1543. #endif