exit.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/iocontext.h>
  15. #include <linux/key.h>
  16. #include <linux/security.h>
  17. #include <linux/cpu.h>
  18. #include <linux/acct.h>
  19. #include <linux/tsacct_kern.h>
  20. #include <linux/file.h>
  21. #include <linux/fdtable.h>
  22. #include <linux/freezer.h>
  23. #include <linux/binfmts.h>
  24. #include <linux/nsproxy.h>
  25. #include <linux/pid_namespace.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/profile.h>
  28. #include <linux/mount.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/kthread.h>
  31. #include <linux/mempolicy.h>
  32. #include <linux/taskstats_kern.h>
  33. #include <linux/delayacct.h>
  34. #include <linux/cgroup.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/signal.h>
  37. #include <linux/posix-timers.h>
  38. #include <linux/cn_proc.h>
  39. #include <linux/mutex.h>
  40. #include <linux/futex.h>
  41. #include <linux/pipe_fs_i.h>
  42. #include <linux/audit.h> /* for audit_free() */
  43. #include <linux/resource.h>
  44. #include <linux/blkdev.h>
  45. #include <linux/task_io_accounting_ops.h>
  46. #include <linux/tracehook.h>
  47. #include <linux/fs_struct.h>
  48. #include <linux/init_task.h>
  49. #include <linux/perf_event.h>
  50. #include <trace/events/sched.h>
  51. #include <linux/hw_breakpoint.h>
  52. #include <linux/oom.h>
  53. #include <linux/writeback.h>
  54. #include <linux/shm.h>
  55. #include <linux/kcov.h>
  56. #include <asm/uaccess.h>
  57. #include <asm/unistd.h>
  58. #include <asm/pgtable.h>
  59. #include <asm/mmu_context.h>
  60. static void __unhash_process(struct task_struct *p, bool group_dead)
  61. {
  62. nr_threads--;
  63. detach_pid(p, PIDTYPE_PID);
  64. if (group_dead) {
  65. detach_pid(p, PIDTYPE_PGID);
  66. detach_pid(p, PIDTYPE_SID);
  67. list_del_rcu(&p->tasks);
  68. list_del_init(&p->sibling);
  69. __this_cpu_dec(process_counts);
  70. }
  71. list_del_rcu(&p->thread_group);
  72. list_del_rcu(&p->thread_node);
  73. }
  74. /*
  75. * This function expects the tasklist_lock write-locked.
  76. */
  77. static void __exit_signal(struct task_struct *tsk)
  78. {
  79. struct signal_struct *sig = tsk->signal;
  80. bool group_dead = thread_group_leader(tsk);
  81. struct sighand_struct *sighand;
  82. struct tty_struct *uninitialized_var(tty);
  83. cputime_t utime, stime;
  84. sighand = rcu_dereference_check(tsk->sighand,
  85. lockdep_tasklist_lock_is_held());
  86. spin_lock(&sighand->siglock);
  87. posix_cpu_timers_exit(tsk);
  88. if (group_dead) {
  89. posix_cpu_timers_exit_group(tsk);
  90. tty = sig->tty;
  91. sig->tty = NULL;
  92. } else {
  93. /*
  94. * This can only happen if the caller is de_thread().
  95. * FIXME: this is the temporary hack, we should teach
  96. * posix-cpu-timers to handle this case correctly.
  97. */
  98. if (unlikely(has_group_leader_pid(tsk)))
  99. posix_cpu_timers_exit_group(tsk);
  100. /*
  101. * If there is any task waiting for the group exit
  102. * then notify it:
  103. */
  104. if (sig->notify_count > 0 && !--sig->notify_count)
  105. wake_up_process(sig->group_exit_task);
  106. if (tsk == sig->curr_target)
  107. sig->curr_target = next_thread(tsk);
  108. }
  109. /*
  110. * Accumulate here the counters for all threads as they die. We could
  111. * skip the group leader because it is the last user of signal_struct,
  112. * but we want to avoid the race with thread_group_cputime() which can
  113. * see the empty ->thread_head list.
  114. */
  115. task_cputime(tsk, &utime, &stime);
  116. write_seqlock(&sig->stats_lock);
  117. sig->utime += utime;
  118. sig->stime += stime;
  119. sig->gtime += task_gtime(tsk);
  120. sig->min_flt += tsk->min_flt;
  121. sig->maj_flt += tsk->maj_flt;
  122. sig->nvcsw += tsk->nvcsw;
  123. sig->nivcsw += tsk->nivcsw;
  124. sig->inblock += task_io_get_inblock(tsk);
  125. sig->oublock += task_io_get_oublock(tsk);
  126. task_io_accounting_add(&sig->ioac, &tsk->ioac);
  127. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  128. sig->nr_threads--;
  129. __unhash_process(tsk, group_dead);
  130. write_sequnlock(&sig->stats_lock);
  131. /*
  132. * Do this under ->siglock, we can race with another thread
  133. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  134. */
  135. flush_sigqueue(&tsk->pending);
  136. tsk->sighand = NULL;
  137. spin_unlock(&sighand->siglock);
  138. __cleanup_sighand(sighand);
  139. clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
  140. if (group_dead) {
  141. flush_sigqueue(&sig->shared_pending);
  142. tty_kref_put(tty);
  143. }
  144. }
  145. static void delayed_put_task_struct(struct rcu_head *rhp)
  146. {
  147. struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  148. perf_event_delayed_put(tsk);
  149. trace_sched_process_free(tsk);
  150. put_task_struct(tsk);
  151. }
  152. void release_task(struct task_struct *p)
  153. {
  154. struct task_struct *leader;
  155. int zap_leader;
  156. repeat:
  157. /* don't need to get the RCU readlock here - the process is dead and
  158. * can't be modifying its own credentials. But shut RCU-lockdep up */
  159. rcu_read_lock();
  160. atomic_dec(&__task_cred(p)->user->processes);
  161. rcu_read_unlock();
  162. proc_flush_task(p);
  163. write_lock_irq(&tasklist_lock);
  164. ptrace_release_task(p);
  165. __exit_signal(p);
  166. /*
  167. * If we are the last non-leader member of the thread
  168. * group, and the leader is zombie, then notify the
  169. * group leader's parent process. (if it wants notification.)
  170. */
  171. zap_leader = 0;
  172. leader = p->group_leader;
  173. if (leader != p && thread_group_empty(leader)
  174. && leader->exit_state == EXIT_ZOMBIE) {
  175. /*
  176. * If we were the last child thread and the leader has
  177. * exited already, and the leader's parent ignores SIGCHLD,
  178. * then we are the one who should release the leader.
  179. */
  180. zap_leader = do_notify_parent(leader, leader->exit_signal);
  181. if (zap_leader)
  182. leader->exit_state = EXIT_DEAD;
  183. }
  184. write_unlock_irq(&tasklist_lock);
  185. release_thread(p);
  186. call_rcu(&p->rcu, delayed_put_task_struct);
  187. p = leader;
  188. if (unlikely(zap_leader))
  189. goto repeat;
  190. }
  191. /*
  192. * Note that if this function returns a valid task_struct pointer (!NULL)
  193. * task->usage must remain >0 for the duration of the RCU critical section.
  194. */
  195. struct task_struct *task_rcu_dereference(struct task_struct **ptask)
  196. {
  197. struct sighand_struct *sighand;
  198. struct task_struct *task;
  199. /*
  200. * We need to verify that release_task() was not called and thus
  201. * delayed_put_task_struct() can't run and drop the last reference
  202. * before rcu_read_unlock(). We check task->sighand != NULL,
  203. * but we can read the already freed and reused memory.
  204. */
  205. retry:
  206. task = rcu_dereference(*ptask);
  207. if (!task)
  208. return NULL;
  209. probe_kernel_address(&task->sighand, sighand);
  210. /*
  211. * Pairs with atomic_dec_and_test() in put_task_struct(). If this task
  212. * was already freed we can not miss the preceding update of this
  213. * pointer.
  214. */
  215. smp_rmb();
  216. if (unlikely(task != READ_ONCE(*ptask)))
  217. goto retry;
  218. /*
  219. * We've re-checked that "task == *ptask", now we have two different
  220. * cases:
  221. *
  222. * 1. This is actually the same task/task_struct. In this case
  223. * sighand != NULL tells us it is still alive.
  224. *
  225. * 2. This is another task which got the same memory for task_struct.
  226. * We can't know this of course, and we can not trust
  227. * sighand != NULL.
  228. *
  229. * In this case we actually return a random value, but this is
  230. * correct.
  231. *
  232. * If we return NULL - we can pretend that we actually noticed that
  233. * *ptask was updated when the previous task has exited. Or pretend
  234. * that probe_slab_address(&sighand) reads NULL.
  235. *
  236. * If we return the new task (because sighand is not NULL for any
  237. * reason) - this is fine too. This (new) task can't go away before
  238. * another gp pass.
  239. *
  240. * And note: We could even eliminate the false positive if re-read
  241. * task->sighand once again to avoid the falsely NULL. But this case
  242. * is very unlikely so we don't care.
  243. */
  244. if (!sighand)
  245. return NULL;
  246. return task;
  247. }
  248. struct task_struct *try_get_task_struct(struct task_struct **ptask)
  249. {
  250. struct task_struct *task;
  251. rcu_read_lock();
  252. task = task_rcu_dereference(ptask);
  253. if (task)
  254. get_task_struct(task);
  255. rcu_read_unlock();
  256. return task;
  257. }
  258. /*
  259. * Determine if a process group is "orphaned", according to the POSIX
  260. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  261. * by terminal-generated stop signals. Newly orphaned process groups are
  262. * to receive a SIGHUP and a SIGCONT.
  263. *
  264. * "I ask you, have you ever known what it is to be an orphan?"
  265. */
  266. static int will_become_orphaned_pgrp(struct pid *pgrp,
  267. struct task_struct *ignored_task)
  268. {
  269. struct task_struct *p;
  270. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  271. if ((p == ignored_task) ||
  272. (p->exit_state && thread_group_empty(p)) ||
  273. is_global_init(p->real_parent))
  274. continue;
  275. if (task_pgrp(p->real_parent) != pgrp &&
  276. task_session(p->real_parent) == task_session(p))
  277. return 0;
  278. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  279. return 1;
  280. }
  281. int is_current_pgrp_orphaned(void)
  282. {
  283. int retval;
  284. read_lock(&tasklist_lock);
  285. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  286. read_unlock(&tasklist_lock);
  287. return retval;
  288. }
  289. static bool has_stopped_jobs(struct pid *pgrp)
  290. {
  291. struct task_struct *p;
  292. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  293. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  294. return true;
  295. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  296. return false;
  297. }
  298. /*
  299. * Check to see if any process groups have become orphaned as
  300. * a result of our exiting, and if they have any stopped jobs,
  301. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  302. */
  303. static void
  304. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  305. {
  306. struct pid *pgrp = task_pgrp(tsk);
  307. struct task_struct *ignored_task = tsk;
  308. if (!parent)
  309. /* exit: our father is in a different pgrp than
  310. * we are and we were the only connection outside.
  311. */
  312. parent = tsk->real_parent;
  313. else
  314. /* reparent: our child is in a different pgrp than
  315. * we are, and it was the only connection outside.
  316. */
  317. ignored_task = NULL;
  318. if (task_pgrp(parent) != pgrp &&
  319. task_session(parent) == task_session(tsk) &&
  320. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  321. has_stopped_jobs(pgrp)) {
  322. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  323. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  324. }
  325. }
  326. #ifdef CONFIG_MEMCG
  327. /*
  328. * A task is exiting. If it owned this mm, find a new owner for the mm.
  329. */
  330. void mm_update_next_owner(struct mm_struct *mm)
  331. {
  332. struct task_struct *c, *g, *p = current;
  333. retry:
  334. /*
  335. * If the exiting or execing task is not the owner, it's
  336. * someone else's problem.
  337. */
  338. if (mm->owner != p)
  339. return;
  340. /*
  341. * The current owner is exiting/execing and there are no other
  342. * candidates. Do not leave the mm pointing to a possibly
  343. * freed task structure.
  344. */
  345. if (atomic_read(&mm->mm_users) <= 1) {
  346. mm->owner = NULL;
  347. return;
  348. }
  349. read_lock(&tasklist_lock);
  350. /*
  351. * Search in the children
  352. */
  353. list_for_each_entry(c, &p->children, sibling) {
  354. if (c->mm == mm)
  355. goto assign_new_owner;
  356. }
  357. /*
  358. * Search in the siblings
  359. */
  360. list_for_each_entry(c, &p->real_parent->children, sibling) {
  361. if (c->mm == mm)
  362. goto assign_new_owner;
  363. }
  364. /*
  365. * Search through everything else, we should not get here often.
  366. */
  367. for_each_process(g) {
  368. if (g->flags & PF_KTHREAD)
  369. continue;
  370. for_each_thread(g, c) {
  371. if (c->mm == mm)
  372. goto assign_new_owner;
  373. if (c->mm)
  374. break;
  375. }
  376. }
  377. read_unlock(&tasklist_lock);
  378. /*
  379. * We found no owner yet mm_users > 1: this implies that we are
  380. * most likely racing with swapoff (try_to_unuse()) or /proc or
  381. * ptrace or page migration (get_task_mm()). Mark owner as NULL.
  382. */
  383. mm->owner = NULL;
  384. return;
  385. assign_new_owner:
  386. BUG_ON(c == p);
  387. get_task_struct(c);
  388. /*
  389. * The task_lock protects c->mm from changing.
  390. * We always want mm->owner->mm == mm
  391. */
  392. task_lock(c);
  393. /*
  394. * Delay read_unlock() till we have the task_lock()
  395. * to ensure that c does not slip away underneath us
  396. */
  397. read_unlock(&tasklist_lock);
  398. if (c->mm != mm) {
  399. task_unlock(c);
  400. put_task_struct(c);
  401. goto retry;
  402. }
  403. mm->owner = c;
  404. task_unlock(c);
  405. put_task_struct(c);
  406. }
  407. #endif /* CONFIG_MEMCG */
  408. /*
  409. * Turn us into a lazy TLB process if we
  410. * aren't already..
  411. */
  412. static void exit_mm(struct task_struct *tsk)
  413. {
  414. struct mm_struct *mm = tsk->mm;
  415. struct core_state *core_state;
  416. mm_release(tsk, mm);
  417. if (!mm)
  418. return;
  419. sync_mm_rss(mm);
  420. /*
  421. * Serialize with any possible pending coredump.
  422. * We must hold mmap_sem around checking core_state
  423. * and clearing tsk->mm. The core-inducing thread
  424. * will increment ->nr_threads for each thread in the
  425. * group with ->mm != NULL.
  426. */
  427. down_read(&mm->mmap_sem);
  428. core_state = mm->core_state;
  429. if (core_state) {
  430. struct core_thread self;
  431. up_read(&mm->mmap_sem);
  432. self.task = tsk;
  433. self.next = xchg(&core_state->dumper.next, &self);
  434. /*
  435. * Implies mb(), the result of xchg() must be visible
  436. * to core_state->dumper.
  437. */
  438. if (atomic_dec_and_test(&core_state->nr_threads))
  439. complete(&core_state->startup);
  440. for (;;) {
  441. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  442. if (!self.task) /* see coredump_finish() */
  443. break;
  444. freezable_schedule();
  445. }
  446. __set_task_state(tsk, TASK_RUNNING);
  447. down_read(&mm->mmap_sem);
  448. }
  449. atomic_inc(&mm->mm_count);
  450. BUG_ON(mm != tsk->active_mm);
  451. /* more a memory barrier than a real lock */
  452. task_lock(tsk);
  453. tsk->mm = NULL;
  454. up_read(&mm->mmap_sem);
  455. enter_lazy_tlb(mm, current);
  456. task_unlock(tsk);
  457. mm_update_next_owner(mm);
  458. mmput(mm);
  459. if (test_thread_flag(TIF_MEMDIE))
  460. exit_oom_victim(tsk);
  461. }
  462. static struct task_struct *find_alive_thread(struct task_struct *p)
  463. {
  464. struct task_struct *t;
  465. for_each_thread(p, t) {
  466. if (!(t->flags & PF_EXITING))
  467. return t;
  468. }
  469. return NULL;
  470. }
  471. static struct task_struct *find_child_reaper(struct task_struct *father)
  472. __releases(&tasklist_lock)
  473. __acquires(&tasklist_lock)
  474. {
  475. struct pid_namespace *pid_ns = task_active_pid_ns(father);
  476. struct task_struct *reaper = pid_ns->child_reaper;
  477. if (likely(reaper != father))
  478. return reaper;
  479. reaper = find_alive_thread(father);
  480. if (reaper) {
  481. pid_ns->child_reaper = reaper;
  482. return reaper;
  483. }
  484. write_unlock_irq(&tasklist_lock);
  485. if (unlikely(pid_ns == &init_pid_ns)) {
  486. panic("Attempted to kill init! exitcode=0x%08x\n",
  487. father->signal->group_exit_code ?: father->exit_code);
  488. }
  489. zap_pid_ns_processes(pid_ns);
  490. write_lock_irq(&tasklist_lock);
  491. return father;
  492. }
  493. /*
  494. * When we die, we re-parent all our children, and try to:
  495. * 1. give them to another thread in our thread group, if such a member exists
  496. * 2. give it to the first ancestor process which prctl'd itself as a
  497. * child_subreaper for its children (like a service manager)
  498. * 3. give it to the init process (PID 1) in our pid namespace
  499. */
  500. static struct task_struct *find_new_reaper(struct task_struct *father,
  501. struct task_struct *child_reaper)
  502. {
  503. struct task_struct *thread, *reaper;
  504. thread = find_alive_thread(father);
  505. if (thread)
  506. return thread;
  507. if (father->signal->has_child_subreaper) {
  508. /*
  509. * Find the first ->is_child_subreaper ancestor in our pid_ns.
  510. * We start from father to ensure we can not look into another
  511. * namespace, this is safe because all its threads are dead.
  512. */
  513. for (reaper = father;
  514. !same_thread_group(reaper, child_reaper);
  515. reaper = reaper->real_parent) {
  516. /* call_usermodehelper() descendants need this check */
  517. if (reaper == &init_task)
  518. break;
  519. if (!reaper->signal->is_child_subreaper)
  520. continue;
  521. thread = find_alive_thread(reaper);
  522. if (thread)
  523. return thread;
  524. }
  525. }
  526. return child_reaper;
  527. }
  528. /*
  529. * Any that need to be release_task'd are put on the @dead list.
  530. */
  531. static void reparent_leader(struct task_struct *father, struct task_struct *p,
  532. struct list_head *dead)
  533. {
  534. if (unlikely(p->exit_state == EXIT_DEAD))
  535. return;
  536. /* We don't want people slaying init. */
  537. p->exit_signal = SIGCHLD;
  538. /* If it has exited notify the new parent about this child's death. */
  539. if (!p->ptrace &&
  540. p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
  541. if (do_notify_parent(p, p->exit_signal)) {
  542. p->exit_state = EXIT_DEAD;
  543. list_add(&p->ptrace_entry, dead);
  544. }
  545. }
  546. kill_orphaned_pgrp(p, father);
  547. }
  548. /*
  549. * This does two things:
  550. *
  551. * A. Make init inherit all the child processes
  552. * B. Check to see if any process groups have become orphaned
  553. * as a result of our exiting, and if they have any stopped
  554. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  555. */
  556. static void forget_original_parent(struct task_struct *father,
  557. struct list_head *dead)
  558. {
  559. struct task_struct *p, *t, *reaper;
  560. if (unlikely(!list_empty(&father->ptraced)))
  561. exit_ptrace(father, dead);
  562. /* Can drop and reacquire tasklist_lock */
  563. reaper = find_child_reaper(father);
  564. if (list_empty(&father->children))
  565. return;
  566. reaper = find_new_reaper(father, reaper);
  567. list_for_each_entry(p, &father->children, sibling) {
  568. for_each_thread(p, t) {
  569. t->real_parent = reaper;
  570. BUG_ON((!t->ptrace) != (t->parent == father));
  571. if (likely(!t->ptrace))
  572. t->parent = t->real_parent;
  573. if (t->pdeath_signal)
  574. group_send_sig_info(t->pdeath_signal,
  575. SEND_SIG_NOINFO, t);
  576. }
  577. /*
  578. * If this is a threaded reparent there is no need to
  579. * notify anyone anything has happened.
  580. */
  581. if (!same_thread_group(reaper, father))
  582. reparent_leader(father, p, dead);
  583. }
  584. list_splice_tail_init(&father->children, &reaper->children);
  585. }
  586. /*
  587. * Send signals to all our closest relatives so that they know
  588. * to properly mourn us..
  589. */
  590. static void exit_notify(struct task_struct *tsk, int group_dead)
  591. {
  592. bool autoreap;
  593. struct task_struct *p, *n;
  594. LIST_HEAD(dead);
  595. write_lock_irq(&tasklist_lock);
  596. forget_original_parent(tsk, &dead);
  597. if (group_dead)
  598. kill_orphaned_pgrp(tsk->group_leader, NULL);
  599. if (unlikely(tsk->ptrace)) {
  600. int sig = thread_group_leader(tsk) &&
  601. thread_group_empty(tsk) &&
  602. !ptrace_reparented(tsk) ?
  603. tsk->exit_signal : SIGCHLD;
  604. autoreap = do_notify_parent(tsk, sig);
  605. } else if (thread_group_leader(tsk)) {
  606. autoreap = thread_group_empty(tsk) &&
  607. do_notify_parent(tsk, tsk->exit_signal);
  608. } else {
  609. autoreap = true;
  610. }
  611. tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
  612. if (tsk->exit_state == EXIT_DEAD)
  613. list_add(&tsk->ptrace_entry, &dead);
  614. /* mt-exec, de_thread() is waiting for group leader */
  615. if (unlikely(tsk->signal->notify_count < 0))
  616. wake_up_process(tsk->signal->group_exit_task);
  617. write_unlock_irq(&tasklist_lock);
  618. list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
  619. list_del_init(&p->ptrace_entry);
  620. release_task(p);
  621. }
  622. }
  623. #ifdef CONFIG_DEBUG_STACK_USAGE
  624. static void check_stack_usage(void)
  625. {
  626. static DEFINE_SPINLOCK(low_water_lock);
  627. static int lowest_to_date = THREAD_SIZE;
  628. unsigned long free;
  629. free = stack_not_used(current);
  630. if (free >= lowest_to_date)
  631. return;
  632. spin_lock(&low_water_lock);
  633. if (free < lowest_to_date) {
  634. pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
  635. current->comm, task_pid_nr(current), free);
  636. lowest_to_date = free;
  637. }
  638. spin_unlock(&low_water_lock);
  639. }
  640. #else
  641. static inline void check_stack_usage(void) {}
  642. #endif
  643. void do_exit(long code)
  644. {
  645. struct task_struct *tsk = current;
  646. int group_dead;
  647. TASKS_RCU(int tasks_rcu_i);
  648. profile_task_exit(tsk);
  649. kcov_task_exit(tsk);
  650. WARN_ON(blk_needs_flush_plug(tsk));
  651. if (unlikely(in_interrupt()))
  652. panic("Aiee, killing interrupt handler!");
  653. if (unlikely(!tsk->pid))
  654. panic("Attempted to kill the idle task!");
  655. /*
  656. * If do_exit is called because this processes oopsed, it's possible
  657. * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
  658. * continuing. Amongst other possible reasons, this is to prevent
  659. * mm_release()->clear_child_tid() from writing to a user-controlled
  660. * kernel address.
  661. */
  662. set_fs(USER_DS);
  663. ptrace_event(PTRACE_EVENT_EXIT, code);
  664. validate_creds_for_do_exit(tsk);
  665. /*
  666. * We're taking recursive faults here in do_exit. Safest is to just
  667. * leave this task alone and wait for reboot.
  668. */
  669. if (unlikely(tsk->flags & PF_EXITING)) {
  670. pr_alert("Fixing recursive fault but reboot is needed!\n");
  671. /*
  672. * We can do this unlocked here. The futex code uses
  673. * this flag just to verify whether the pi state
  674. * cleanup has been done or not. In the worst case it
  675. * loops once more. We pretend that the cleanup was
  676. * done as there is no way to return. Either the
  677. * OWNER_DIED bit is set by now or we push the blocked
  678. * task into the wait for ever nirwana as well.
  679. */
  680. tsk->flags |= PF_EXITPIDONE;
  681. set_current_state(TASK_UNINTERRUPTIBLE);
  682. schedule();
  683. }
  684. exit_signals(tsk); /* sets PF_EXITING */
  685. /*
  686. * Ensure that all new tsk->pi_lock acquisitions must observe
  687. * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
  688. */
  689. smp_mb();
  690. /*
  691. * Ensure that we must observe the pi_state in exit_mm() ->
  692. * mm_release() -> exit_pi_state_list().
  693. */
  694. raw_spin_unlock_wait(&tsk->pi_lock);
  695. if (unlikely(in_atomic())) {
  696. pr_info("note: %s[%d] exited with preempt_count %d\n",
  697. current->comm, task_pid_nr(current),
  698. preempt_count());
  699. preempt_count_set(PREEMPT_ENABLED);
  700. }
  701. /* sync mm's RSS info before statistics gathering */
  702. if (tsk->mm)
  703. sync_mm_rss(tsk->mm);
  704. acct_update_integrals(tsk);
  705. group_dead = atomic_dec_and_test(&tsk->signal->live);
  706. if (group_dead) {
  707. hrtimer_cancel(&tsk->signal->real_timer);
  708. exit_itimers(tsk->signal);
  709. if (tsk->mm)
  710. setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
  711. }
  712. acct_collect(code, group_dead);
  713. if (group_dead)
  714. tty_audit_exit();
  715. audit_free(tsk);
  716. tsk->exit_code = code;
  717. taskstats_exit(tsk, group_dead);
  718. exit_mm(tsk);
  719. if (group_dead)
  720. acct_process();
  721. trace_sched_process_exit(tsk);
  722. exit_sem(tsk);
  723. exit_shm(tsk);
  724. exit_files(tsk);
  725. exit_fs(tsk);
  726. if (group_dead)
  727. disassociate_ctty(1);
  728. exit_task_namespaces(tsk);
  729. exit_task_work(tsk);
  730. exit_thread(tsk);
  731. /*
  732. * Flush inherited counters to the parent - before the parent
  733. * gets woken up by child-exit notifications.
  734. *
  735. * because of cgroup mode, must be called before cgroup_exit()
  736. */
  737. perf_event_exit_task(tsk);
  738. cgroup_exit(tsk);
  739. /*
  740. * FIXME: do that only when needed, using sched_exit tracepoint
  741. */
  742. flush_ptrace_hw_breakpoint(tsk);
  743. TASKS_RCU(preempt_disable());
  744. TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
  745. TASKS_RCU(preempt_enable());
  746. exit_notify(tsk, group_dead);
  747. proc_exit_connector(tsk);
  748. mpol_put_task_policy(tsk);
  749. #ifdef CONFIG_FUTEX
  750. if (unlikely(current->pi_state_cache))
  751. kfree(current->pi_state_cache);
  752. #endif
  753. /*
  754. * Make sure we are holding no locks:
  755. */
  756. debug_check_no_locks_held();
  757. /*
  758. * We can do this unlocked here. The futex code uses this flag
  759. * just to verify whether the pi state cleanup has been done
  760. * or not. In the worst case it loops once more.
  761. */
  762. tsk->flags |= PF_EXITPIDONE;
  763. if (tsk->io_context)
  764. exit_io_context(tsk);
  765. if (tsk->splice_pipe)
  766. free_pipe_info(tsk->splice_pipe);
  767. if (tsk->task_frag.page)
  768. put_page(tsk->task_frag.page);
  769. validate_creds_for_do_exit(tsk);
  770. check_stack_usage();
  771. preempt_disable();
  772. if (tsk->nr_dirtied)
  773. __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
  774. exit_rcu();
  775. TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
  776. /*
  777. * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
  778. * when the following two conditions become true.
  779. * - There is race condition of mmap_sem (It is acquired by
  780. * exit_mm()), and
  781. * - SMI occurs before setting TASK_RUNINNG.
  782. * (or hypervisor of virtual machine switches to other guest)
  783. * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
  784. *
  785. * To avoid it, we have to wait for releasing tsk->pi_lock which
  786. * is held by try_to_wake_up()
  787. */
  788. smp_mb();
  789. raw_spin_unlock_wait(&tsk->pi_lock);
  790. /* causes final put_task_struct in finish_task_switch(). */
  791. tsk->state = TASK_DEAD;
  792. tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
  793. schedule();
  794. BUG();
  795. /* Avoid "noreturn function does return". */
  796. for (;;)
  797. cpu_relax(); /* For when BUG is null */
  798. }
  799. EXPORT_SYMBOL_GPL(do_exit);
  800. void complete_and_exit(struct completion *comp, long code)
  801. {
  802. if (comp)
  803. complete(comp);
  804. do_exit(code);
  805. }
  806. EXPORT_SYMBOL(complete_and_exit);
  807. SYSCALL_DEFINE1(exit, int, error_code)
  808. {
  809. do_exit((error_code&0xff)<<8);
  810. }
  811. /*
  812. * Take down every thread in the group. This is called by fatal signals
  813. * as well as by sys_exit_group (below).
  814. */
  815. void
  816. do_group_exit(int exit_code)
  817. {
  818. struct signal_struct *sig = current->signal;
  819. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  820. if (signal_group_exit(sig))
  821. exit_code = sig->group_exit_code;
  822. else if (!thread_group_empty(current)) {
  823. struct sighand_struct *const sighand = current->sighand;
  824. spin_lock_irq(&sighand->siglock);
  825. if (signal_group_exit(sig))
  826. /* Another thread got here before we took the lock. */
  827. exit_code = sig->group_exit_code;
  828. else {
  829. sig->group_exit_code = exit_code;
  830. sig->flags = SIGNAL_GROUP_EXIT;
  831. zap_other_threads(current);
  832. }
  833. spin_unlock_irq(&sighand->siglock);
  834. }
  835. do_exit(exit_code);
  836. /* NOTREACHED */
  837. }
  838. /*
  839. * this kills every thread in the thread group. Note that any externally
  840. * wait4()-ing process will get the correct exit code - even if this
  841. * thread is not the thread group leader.
  842. */
  843. SYSCALL_DEFINE1(exit_group, int, error_code)
  844. {
  845. do_group_exit((error_code & 0xff) << 8);
  846. /* NOTREACHED */
  847. return 0;
  848. }
  849. struct wait_opts {
  850. enum pid_type wo_type;
  851. int wo_flags;
  852. struct pid *wo_pid;
  853. struct siginfo __user *wo_info;
  854. int __user *wo_stat;
  855. struct rusage __user *wo_rusage;
  856. wait_queue_t child_wait;
  857. int notask_error;
  858. };
  859. static inline
  860. struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  861. {
  862. if (type != PIDTYPE_PID)
  863. task = task->group_leader;
  864. return task->pids[type].pid;
  865. }
  866. static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
  867. {
  868. return wo->wo_type == PIDTYPE_MAX ||
  869. task_pid_type(p, wo->wo_type) == wo->wo_pid;
  870. }
  871. static int
  872. eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
  873. {
  874. if (!eligible_pid(wo, p))
  875. return 0;
  876. /*
  877. * Wait for all children (clone and not) if __WALL is set or
  878. * if it is traced by us.
  879. */
  880. if (ptrace || (wo->wo_flags & __WALL))
  881. return 1;
  882. /*
  883. * Otherwise, wait for clone children *only* if __WCLONE is set;
  884. * otherwise, wait for non-clone children *only*.
  885. *
  886. * Note: a "clone" child here is one that reports to its parent
  887. * using a signal other than SIGCHLD, or a non-leader thread which
  888. * we can only see if it is traced by us.
  889. */
  890. if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
  891. return 0;
  892. return 1;
  893. }
  894. static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
  895. pid_t pid, uid_t uid, int why, int status)
  896. {
  897. struct siginfo __user *infop;
  898. int retval = wo->wo_rusage
  899. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  900. put_task_struct(p);
  901. infop = wo->wo_info;
  902. if (infop) {
  903. if (!retval)
  904. retval = put_user(SIGCHLD, &infop->si_signo);
  905. if (!retval)
  906. retval = put_user(0, &infop->si_errno);
  907. if (!retval)
  908. retval = put_user((short)why, &infop->si_code);
  909. if (!retval)
  910. retval = put_user(pid, &infop->si_pid);
  911. if (!retval)
  912. retval = put_user(uid, &infop->si_uid);
  913. if (!retval)
  914. retval = put_user(status, &infop->si_status);
  915. }
  916. if (!retval)
  917. retval = pid;
  918. return retval;
  919. }
  920. /*
  921. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  922. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  923. * the lock and this task is uninteresting. If we return nonzero, we have
  924. * released the lock and the system call should return.
  925. */
  926. static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
  927. {
  928. int state, retval, status;
  929. pid_t pid = task_pid_vnr(p);
  930. uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
  931. struct siginfo __user *infop;
  932. if (!likely(wo->wo_flags & WEXITED))
  933. return 0;
  934. if (unlikely(wo->wo_flags & WNOWAIT)) {
  935. int exit_code = p->exit_code;
  936. int why;
  937. get_task_struct(p);
  938. read_unlock(&tasklist_lock);
  939. sched_annotate_sleep();
  940. if ((exit_code & 0x7f) == 0) {
  941. why = CLD_EXITED;
  942. status = exit_code >> 8;
  943. } else {
  944. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  945. status = exit_code & 0x7f;
  946. }
  947. return wait_noreap_copyout(wo, p, pid, uid, why, status);
  948. }
  949. /*
  950. * Move the task's state to DEAD/TRACE, only one thread can do this.
  951. */
  952. state = (ptrace_reparented(p) && thread_group_leader(p)) ?
  953. EXIT_TRACE : EXIT_DEAD;
  954. if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
  955. return 0;
  956. /*
  957. * We own this thread, nobody else can reap it.
  958. */
  959. read_unlock(&tasklist_lock);
  960. sched_annotate_sleep();
  961. /*
  962. * Check thread_group_leader() to exclude the traced sub-threads.
  963. */
  964. if (state == EXIT_DEAD && thread_group_leader(p)) {
  965. struct signal_struct *sig = p->signal;
  966. struct signal_struct *psig = current->signal;
  967. unsigned long maxrss;
  968. cputime_t tgutime, tgstime;
  969. /*
  970. * The resource counters for the group leader are in its
  971. * own task_struct. Those for dead threads in the group
  972. * are in its signal_struct, as are those for the child
  973. * processes it has previously reaped. All these
  974. * accumulate in the parent's signal_struct c* fields.
  975. *
  976. * We don't bother to take a lock here to protect these
  977. * p->signal fields because the whole thread group is dead
  978. * and nobody can change them.
  979. *
  980. * psig->stats_lock also protects us from our sub-theads
  981. * which can reap other children at the same time. Until
  982. * we change k_getrusage()-like users to rely on this lock
  983. * we have to take ->siglock as well.
  984. *
  985. * We use thread_group_cputime_adjusted() to get times for
  986. * the thread group, which consolidates times for all threads
  987. * in the group including the group leader.
  988. */
  989. thread_group_cputime_adjusted(p, &tgutime, &tgstime);
  990. spin_lock_irq(&current->sighand->siglock);
  991. write_seqlock(&psig->stats_lock);
  992. psig->cutime += tgutime + sig->cutime;
  993. psig->cstime += tgstime + sig->cstime;
  994. psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
  995. psig->cmin_flt +=
  996. p->min_flt + sig->min_flt + sig->cmin_flt;
  997. psig->cmaj_flt +=
  998. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  999. psig->cnvcsw +=
  1000. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  1001. psig->cnivcsw +=
  1002. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  1003. psig->cinblock +=
  1004. task_io_get_inblock(p) +
  1005. sig->inblock + sig->cinblock;
  1006. psig->coublock +=
  1007. task_io_get_oublock(p) +
  1008. sig->oublock + sig->coublock;
  1009. maxrss = max(sig->maxrss, sig->cmaxrss);
  1010. if (psig->cmaxrss < maxrss)
  1011. psig->cmaxrss = maxrss;
  1012. task_io_accounting_add(&psig->ioac, &p->ioac);
  1013. task_io_accounting_add(&psig->ioac, &sig->ioac);
  1014. write_sequnlock(&psig->stats_lock);
  1015. spin_unlock_irq(&current->sighand->siglock);
  1016. }
  1017. retval = wo->wo_rusage
  1018. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1019. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1020. ? p->signal->group_exit_code : p->exit_code;
  1021. if (!retval && wo->wo_stat)
  1022. retval = put_user(status, wo->wo_stat);
  1023. infop = wo->wo_info;
  1024. if (!retval && infop)
  1025. retval = put_user(SIGCHLD, &infop->si_signo);
  1026. if (!retval && infop)
  1027. retval = put_user(0, &infop->si_errno);
  1028. if (!retval && infop) {
  1029. int why;
  1030. if ((status & 0x7f) == 0) {
  1031. why = CLD_EXITED;
  1032. status >>= 8;
  1033. } else {
  1034. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1035. status &= 0x7f;
  1036. }
  1037. retval = put_user((short)why, &infop->si_code);
  1038. if (!retval)
  1039. retval = put_user(status, &infop->si_status);
  1040. }
  1041. if (!retval && infop)
  1042. retval = put_user(pid, &infop->si_pid);
  1043. if (!retval && infop)
  1044. retval = put_user(uid, &infop->si_uid);
  1045. if (!retval)
  1046. retval = pid;
  1047. if (state == EXIT_TRACE) {
  1048. write_lock_irq(&tasklist_lock);
  1049. /* We dropped tasklist, ptracer could die and untrace */
  1050. ptrace_unlink(p);
  1051. /* If parent wants a zombie, don't release it now */
  1052. state = EXIT_ZOMBIE;
  1053. if (do_notify_parent(p, p->exit_signal))
  1054. state = EXIT_DEAD;
  1055. p->exit_state = state;
  1056. write_unlock_irq(&tasklist_lock);
  1057. }
  1058. if (state == EXIT_DEAD)
  1059. release_task(p);
  1060. return retval;
  1061. }
  1062. static int *task_stopped_code(struct task_struct *p, bool ptrace)
  1063. {
  1064. if (ptrace) {
  1065. if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
  1066. return &p->exit_code;
  1067. } else {
  1068. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  1069. return &p->signal->group_exit_code;
  1070. }
  1071. return NULL;
  1072. }
  1073. /**
  1074. * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
  1075. * @wo: wait options
  1076. * @ptrace: is the wait for ptrace
  1077. * @p: task to wait for
  1078. *
  1079. * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
  1080. *
  1081. * CONTEXT:
  1082. * read_lock(&tasklist_lock), which is released if return value is
  1083. * non-zero. Also, grabs and releases @p->sighand->siglock.
  1084. *
  1085. * RETURNS:
  1086. * 0 if wait condition didn't exist and search for other wait conditions
  1087. * should continue. Non-zero return, -errno on failure and @p's pid on
  1088. * success, implies that tasklist_lock is released and wait condition
  1089. * search should terminate.
  1090. */
  1091. static int wait_task_stopped(struct wait_opts *wo,
  1092. int ptrace, struct task_struct *p)
  1093. {
  1094. struct siginfo __user *infop;
  1095. int retval, exit_code, *p_code, why;
  1096. uid_t uid = 0; /* unneeded, required by compiler */
  1097. pid_t pid;
  1098. /*
  1099. * Traditionally we see ptrace'd stopped tasks regardless of options.
  1100. */
  1101. if (!ptrace && !(wo->wo_flags & WUNTRACED))
  1102. return 0;
  1103. if (!task_stopped_code(p, ptrace))
  1104. return 0;
  1105. exit_code = 0;
  1106. spin_lock_irq(&p->sighand->siglock);
  1107. p_code = task_stopped_code(p, ptrace);
  1108. if (unlikely(!p_code))
  1109. goto unlock_sig;
  1110. exit_code = *p_code;
  1111. if (!exit_code)
  1112. goto unlock_sig;
  1113. if (!unlikely(wo->wo_flags & WNOWAIT))
  1114. *p_code = 0;
  1115. uid = from_kuid_munged(current_user_ns(), task_uid(p));
  1116. unlock_sig:
  1117. spin_unlock_irq(&p->sighand->siglock);
  1118. if (!exit_code)
  1119. return 0;
  1120. /*
  1121. * Now we are pretty sure this task is interesting.
  1122. * Make sure it doesn't get reaped out from under us while we
  1123. * give up the lock and then examine it below. We don't want to
  1124. * keep holding onto the tasklist_lock while we call getrusage and
  1125. * possibly take page faults for user memory.
  1126. */
  1127. get_task_struct(p);
  1128. pid = task_pid_vnr(p);
  1129. why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
  1130. read_unlock(&tasklist_lock);
  1131. sched_annotate_sleep();
  1132. if (unlikely(wo->wo_flags & WNOWAIT))
  1133. return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
  1134. retval = wo->wo_rusage
  1135. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1136. if (!retval && wo->wo_stat)
  1137. retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
  1138. infop = wo->wo_info;
  1139. if (!retval && infop)
  1140. retval = put_user(SIGCHLD, &infop->si_signo);
  1141. if (!retval && infop)
  1142. retval = put_user(0, &infop->si_errno);
  1143. if (!retval && infop)
  1144. retval = put_user((short)why, &infop->si_code);
  1145. if (!retval && infop)
  1146. retval = put_user(exit_code, &infop->si_status);
  1147. if (!retval && infop)
  1148. retval = put_user(pid, &infop->si_pid);
  1149. if (!retval && infop)
  1150. retval = put_user(uid, &infop->si_uid);
  1151. if (!retval)
  1152. retval = pid;
  1153. put_task_struct(p);
  1154. BUG_ON(!retval);
  1155. return retval;
  1156. }
  1157. /*
  1158. * Handle do_wait work for one task in a live, non-stopped state.
  1159. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1160. * the lock and this task is uninteresting. If we return nonzero, we have
  1161. * released the lock and the system call should return.
  1162. */
  1163. static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
  1164. {
  1165. int retval;
  1166. pid_t pid;
  1167. uid_t uid;
  1168. if (!unlikely(wo->wo_flags & WCONTINUED))
  1169. return 0;
  1170. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1171. return 0;
  1172. spin_lock_irq(&p->sighand->siglock);
  1173. /* Re-check with the lock held. */
  1174. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1175. spin_unlock_irq(&p->sighand->siglock);
  1176. return 0;
  1177. }
  1178. if (!unlikely(wo->wo_flags & WNOWAIT))
  1179. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1180. uid = from_kuid_munged(current_user_ns(), task_uid(p));
  1181. spin_unlock_irq(&p->sighand->siglock);
  1182. pid = task_pid_vnr(p);
  1183. get_task_struct(p);
  1184. read_unlock(&tasklist_lock);
  1185. sched_annotate_sleep();
  1186. if (!wo->wo_info) {
  1187. retval = wo->wo_rusage
  1188. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1189. put_task_struct(p);
  1190. if (!retval && wo->wo_stat)
  1191. retval = put_user(0xffff, wo->wo_stat);
  1192. if (!retval)
  1193. retval = pid;
  1194. } else {
  1195. retval = wait_noreap_copyout(wo, p, pid, uid,
  1196. CLD_CONTINUED, SIGCONT);
  1197. BUG_ON(retval == 0);
  1198. }
  1199. return retval;
  1200. }
  1201. /*
  1202. * Consider @p for a wait by @parent.
  1203. *
  1204. * -ECHILD should be in ->notask_error before the first call.
  1205. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1206. * Returns zero if the search for a child should continue;
  1207. * then ->notask_error is 0 if @p is an eligible child,
  1208. * or another error from security_task_wait(), or still -ECHILD.
  1209. */
  1210. static int wait_consider_task(struct wait_opts *wo, int ptrace,
  1211. struct task_struct *p)
  1212. {
  1213. /*
  1214. * We can race with wait_task_zombie() from another thread.
  1215. * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
  1216. * can't confuse the checks below.
  1217. */
  1218. int exit_state = ACCESS_ONCE(p->exit_state);
  1219. int ret;
  1220. if (unlikely(exit_state == EXIT_DEAD))
  1221. return 0;
  1222. ret = eligible_child(wo, ptrace, p);
  1223. if (!ret)
  1224. return ret;
  1225. ret = security_task_wait(p);
  1226. if (unlikely(ret < 0)) {
  1227. /*
  1228. * If we have not yet seen any eligible child,
  1229. * then let this error code replace -ECHILD.
  1230. * A permission error will give the user a clue
  1231. * to look for security policy problems, rather
  1232. * than for mysterious wait bugs.
  1233. */
  1234. if (wo->notask_error)
  1235. wo->notask_error = ret;
  1236. return 0;
  1237. }
  1238. if (unlikely(exit_state == EXIT_TRACE)) {
  1239. /*
  1240. * ptrace == 0 means we are the natural parent. In this case
  1241. * we should clear notask_error, debugger will notify us.
  1242. */
  1243. if (likely(!ptrace))
  1244. wo->notask_error = 0;
  1245. return 0;
  1246. }
  1247. if (likely(!ptrace) && unlikely(p->ptrace)) {
  1248. /*
  1249. * If it is traced by its real parent's group, just pretend
  1250. * the caller is ptrace_do_wait() and reap this child if it
  1251. * is zombie.
  1252. *
  1253. * This also hides group stop state from real parent; otherwise
  1254. * a single stop can be reported twice as group and ptrace stop.
  1255. * If a ptracer wants to distinguish these two events for its
  1256. * own children it should create a separate process which takes
  1257. * the role of real parent.
  1258. */
  1259. if (!ptrace_reparented(p))
  1260. ptrace = 1;
  1261. }
  1262. /* slay zombie? */
  1263. if (exit_state == EXIT_ZOMBIE) {
  1264. /* we don't reap group leaders with subthreads */
  1265. if (!delay_group_leader(p)) {
  1266. /*
  1267. * A zombie ptracee is only visible to its ptracer.
  1268. * Notification and reaping will be cascaded to the
  1269. * real parent when the ptracer detaches.
  1270. */
  1271. if (unlikely(ptrace) || likely(!p->ptrace))
  1272. return wait_task_zombie(wo, p);
  1273. }
  1274. /*
  1275. * Allow access to stopped/continued state via zombie by
  1276. * falling through. Clearing of notask_error is complex.
  1277. *
  1278. * When !@ptrace:
  1279. *
  1280. * If WEXITED is set, notask_error should naturally be
  1281. * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
  1282. * so, if there are live subthreads, there are events to
  1283. * wait for. If all subthreads are dead, it's still safe
  1284. * to clear - this function will be called again in finite
  1285. * amount time once all the subthreads are released and
  1286. * will then return without clearing.
  1287. *
  1288. * When @ptrace:
  1289. *
  1290. * Stopped state is per-task and thus can't change once the
  1291. * target task dies. Only continued and exited can happen.
  1292. * Clear notask_error if WCONTINUED | WEXITED.
  1293. */
  1294. if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
  1295. wo->notask_error = 0;
  1296. } else {
  1297. /*
  1298. * @p is alive and it's gonna stop, continue or exit, so
  1299. * there always is something to wait for.
  1300. */
  1301. wo->notask_error = 0;
  1302. }
  1303. /*
  1304. * Wait for stopped. Depending on @ptrace, different stopped state
  1305. * is used and the two don't interact with each other.
  1306. */
  1307. ret = wait_task_stopped(wo, ptrace, p);
  1308. if (ret)
  1309. return ret;
  1310. /*
  1311. * Wait for continued. There's only one continued state and the
  1312. * ptracer can consume it which can confuse the real parent. Don't
  1313. * use WCONTINUED from ptracer. You don't need or want it.
  1314. */
  1315. return wait_task_continued(wo, p);
  1316. }
  1317. /*
  1318. * Do the work of do_wait() for one thread in the group, @tsk.
  1319. *
  1320. * -ECHILD should be in ->notask_error before the first call.
  1321. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1322. * Returns zero if the search for a child should continue; then
  1323. * ->notask_error is 0 if there were any eligible children,
  1324. * or another error from security_task_wait(), or still -ECHILD.
  1325. */
  1326. static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
  1327. {
  1328. struct task_struct *p;
  1329. list_for_each_entry(p, &tsk->children, sibling) {
  1330. int ret = wait_consider_task(wo, 0, p);
  1331. if (ret)
  1332. return ret;
  1333. }
  1334. return 0;
  1335. }
  1336. static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
  1337. {
  1338. struct task_struct *p;
  1339. list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
  1340. int ret = wait_consider_task(wo, 1, p);
  1341. if (ret)
  1342. return ret;
  1343. }
  1344. return 0;
  1345. }
  1346. static int child_wait_callback(wait_queue_t *wait, unsigned mode,
  1347. int sync, void *key)
  1348. {
  1349. struct wait_opts *wo = container_of(wait, struct wait_opts,
  1350. child_wait);
  1351. struct task_struct *p = key;
  1352. if (!eligible_pid(wo, p))
  1353. return 0;
  1354. if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
  1355. return 0;
  1356. return default_wake_function(wait, mode, sync, key);
  1357. }
  1358. void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
  1359. {
  1360. __wake_up_sync_key(&parent->signal->wait_chldexit,
  1361. TASK_INTERRUPTIBLE, 1, p);
  1362. }
  1363. static long do_wait(struct wait_opts *wo)
  1364. {
  1365. struct task_struct *tsk;
  1366. int retval;
  1367. trace_sched_process_wait(wo->wo_pid);
  1368. init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
  1369. wo->child_wait.private = current;
  1370. add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1371. repeat:
  1372. /*
  1373. * If there is nothing that can match our criteria, just get out.
  1374. * We will clear ->notask_error to zero if we see any child that
  1375. * might later match our criteria, even if we are not able to reap
  1376. * it yet.
  1377. */
  1378. wo->notask_error = -ECHILD;
  1379. if ((wo->wo_type < PIDTYPE_MAX) &&
  1380. (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
  1381. goto notask;
  1382. set_current_state(TASK_INTERRUPTIBLE);
  1383. read_lock(&tasklist_lock);
  1384. tsk = current;
  1385. do {
  1386. retval = do_wait_thread(wo, tsk);
  1387. if (retval)
  1388. goto end;
  1389. retval = ptrace_do_wait(wo, tsk);
  1390. if (retval)
  1391. goto end;
  1392. if (wo->wo_flags & __WNOTHREAD)
  1393. break;
  1394. } while_each_thread(current, tsk);
  1395. read_unlock(&tasklist_lock);
  1396. notask:
  1397. retval = wo->notask_error;
  1398. if (!retval && !(wo->wo_flags & WNOHANG)) {
  1399. retval = -ERESTARTSYS;
  1400. if (!signal_pending(current)) {
  1401. schedule();
  1402. goto repeat;
  1403. }
  1404. }
  1405. end:
  1406. __set_current_state(TASK_RUNNING);
  1407. remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1408. return retval;
  1409. }
  1410. SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
  1411. infop, int, options, struct rusage __user *, ru)
  1412. {
  1413. struct wait_opts wo;
  1414. struct pid *pid = NULL;
  1415. enum pid_type type;
  1416. long ret;
  1417. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
  1418. __WNOTHREAD|__WCLONE|__WALL))
  1419. return -EINVAL;
  1420. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1421. return -EINVAL;
  1422. switch (which) {
  1423. case P_ALL:
  1424. type = PIDTYPE_MAX;
  1425. break;
  1426. case P_PID:
  1427. type = PIDTYPE_PID;
  1428. if (upid <= 0)
  1429. return -EINVAL;
  1430. break;
  1431. case P_PGID:
  1432. type = PIDTYPE_PGID;
  1433. if (upid <= 0)
  1434. return -EINVAL;
  1435. break;
  1436. default:
  1437. return -EINVAL;
  1438. }
  1439. if (type < PIDTYPE_MAX)
  1440. pid = find_get_pid(upid);
  1441. wo.wo_type = type;
  1442. wo.wo_pid = pid;
  1443. wo.wo_flags = options;
  1444. wo.wo_info = infop;
  1445. wo.wo_stat = NULL;
  1446. wo.wo_rusage = ru;
  1447. ret = do_wait(&wo);
  1448. if (ret > 0) {
  1449. ret = 0;
  1450. } else if (infop) {
  1451. /*
  1452. * For a WNOHANG return, clear out all the fields
  1453. * we would set so the user can easily tell the
  1454. * difference.
  1455. */
  1456. if (!ret)
  1457. ret = put_user(0, &infop->si_signo);
  1458. if (!ret)
  1459. ret = put_user(0, &infop->si_errno);
  1460. if (!ret)
  1461. ret = put_user(0, &infop->si_code);
  1462. if (!ret)
  1463. ret = put_user(0, &infop->si_pid);
  1464. if (!ret)
  1465. ret = put_user(0, &infop->si_uid);
  1466. if (!ret)
  1467. ret = put_user(0, &infop->si_status);
  1468. }
  1469. put_pid(pid);
  1470. return ret;
  1471. }
  1472. SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  1473. int, options, struct rusage __user *, ru)
  1474. {
  1475. struct wait_opts wo;
  1476. struct pid *pid = NULL;
  1477. enum pid_type type;
  1478. long ret;
  1479. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1480. __WNOTHREAD|__WCLONE|__WALL))
  1481. return -EINVAL;
  1482. if (upid == -1)
  1483. type = PIDTYPE_MAX;
  1484. else if (upid < 0) {
  1485. type = PIDTYPE_PGID;
  1486. pid = find_get_pid(-upid);
  1487. } else if (upid == 0) {
  1488. type = PIDTYPE_PGID;
  1489. pid = get_task_pid(current, PIDTYPE_PGID);
  1490. } else /* upid > 0 */ {
  1491. type = PIDTYPE_PID;
  1492. pid = find_get_pid(upid);
  1493. }
  1494. wo.wo_type = type;
  1495. wo.wo_pid = pid;
  1496. wo.wo_flags = options | WEXITED;
  1497. wo.wo_info = NULL;
  1498. wo.wo_stat = stat_addr;
  1499. wo.wo_rusage = ru;
  1500. ret = do_wait(&wo);
  1501. put_pid(pid);
  1502. return ret;
  1503. }
  1504. #ifdef __ARCH_WANT_SYS_WAITPID
  1505. /*
  1506. * sys_waitpid() remains for compatibility. waitpid() should be
  1507. * implemented by calling sys_wait4() from libc.a.
  1508. */
  1509. SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
  1510. {
  1511. return sys_wait4(pid, stat_addr, options, NULL);
  1512. }
  1513. #endif