sched.h 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411
  1. #ifndef _LINUX_SCHED_H
  2. #define _LINUX_SCHED_H
  3. #include <uapi/linux/sched.h>
  4. #include <linux/sched/prio.h>
  5. #include <asm/param.h> /* for HZ */
  6. #include <linux/capability.h>
  7. #include <linux/threads.h>
  8. #include <linux/kernel.h>
  9. #include <linux/types.h>
  10. #include <linux/timex.h>
  11. #include <linux/jiffies.h>
  12. #include <linux/mutex.h>
  13. #include <linux/plist.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/thread_info.h>
  16. #include <linux/cpumask.h>
  17. #include <linux/errno.h>
  18. #include <linux/nodemask.h>
  19. #include <linux/mm_types.h>
  20. #include <linux/preempt.h>
  21. #include <asm/page.h>
  22. #include <asm/ptrace.h>
  23. #include <linux/smp.h>
  24. #include <linux/sem.h>
  25. #include <linux/shm.h>
  26. #include <linux/signal.h>
  27. #include <linux/compiler.h>
  28. #include <linux/completion.h>
  29. #include <linux/signal_types.h>
  30. #include <linux/pid.h>
  31. #include <linux/percpu.h>
  32. #include <linux/topology.h>
  33. #include <linux/seccomp.h>
  34. #include <linux/rcupdate.h>
  35. #include <linux/rculist.h>
  36. #include <linux/rtmutex.h>
  37. #include <linux/time.h>
  38. #include <linux/param.h>
  39. #include <linux/resource.h>
  40. #include <linux/timer.h>
  41. #include <linux/hrtimer.h>
  42. #include <linux/kcov.h>
  43. #include <linux/task_io_accounting.h>
  44. #include <linux/latencytop.h>
  45. #include <linux/cred.h>
  46. #include <linux/llist.h>
  47. #include <linux/uidgid.h>
  48. #include <linux/gfp.h>
  49. #include <linux/topology.h>
  50. #include <linux/magic.h>
  51. #include <linux/cgroup-defs.h>
  52. #include <asm/processor.h>
  53. struct sched_attr;
  54. struct sched_param;
  55. struct futex_pi_state;
  56. struct robust_list_head;
  57. struct bio_list;
  58. struct fs_struct;
  59. struct perf_event_context;
  60. struct blk_plug;
  61. struct filename;
  62. struct nameidata;
  63. struct signal_struct;
  64. struct sighand_struct;
  65. extern unsigned long total_forks;
  66. extern int nr_threads;
  67. DECLARE_PER_CPU(unsigned long, process_counts);
  68. extern int nr_processes(void);
  69. extern unsigned long nr_running(void);
  70. extern bool single_task_running(void);
  71. extern unsigned long nr_iowait(void);
  72. extern unsigned long nr_iowait_cpu(int cpu);
  73. extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
  74. #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
  75. extern void cpu_load_update_nohz_start(void);
  76. extern void cpu_load_update_nohz_stop(void);
  77. #else
  78. static inline void cpu_load_update_nohz_start(void) { }
  79. static inline void cpu_load_update_nohz_stop(void) { }
  80. #endif
  81. extern void dump_cpu_task(int cpu);
  82. struct seq_file;
  83. struct cfs_rq;
  84. struct task_group;
  85. #ifdef CONFIG_SCHED_DEBUG
  86. extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
  87. extern void proc_sched_set_task(struct task_struct *p);
  88. #endif
  89. /*
  90. * Task state bitmask. NOTE! These bits are also
  91. * encoded in fs/proc/array.c: get_task_state().
  92. *
  93. * We have two separate sets of flags: task->state
  94. * is about runnability, while task->exit_state are
  95. * about the task exiting. Confusing, but this way
  96. * modifying one set can't modify the other one by
  97. * mistake.
  98. */
  99. #define TASK_RUNNING 0
  100. #define TASK_INTERRUPTIBLE 1
  101. #define TASK_UNINTERRUPTIBLE 2
  102. #define __TASK_STOPPED 4
  103. #define __TASK_TRACED 8
  104. /* in tsk->exit_state */
  105. #define EXIT_DEAD 16
  106. #define EXIT_ZOMBIE 32
  107. #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
  108. /* in tsk->state again */
  109. #define TASK_DEAD 64
  110. #define TASK_WAKEKILL 128
  111. #define TASK_WAKING 256
  112. #define TASK_PARKED 512
  113. #define TASK_NOLOAD 1024
  114. #define TASK_NEW 2048
  115. #define TASK_STATE_MAX 4096
  116. #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
  117. /* Convenience macros for the sake of set_current_state */
  118. #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
  119. #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
  120. #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
  121. #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
  122. /* Convenience macros for the sake of wake_up */
  123. #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
  124. #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
  125. /* get_task_state() */
  126. #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
  127. TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
  128. __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
  129. #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
  130. #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
  131. #define task_is_stopped_or_traced(task) \
  132. ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
  133. #define task_contributes_to_load(task) \
  134. ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
  135. (task->flags & PF_FROZEN) == 0 && \
  136. (task->state & TASK_NOLOAD) == 0)
  137. #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  138. #define __set_current_state(state_value) \
  139. do { \
  140. current->task_state_change = _THIS_IP_; \
  141. current->state = (state_value); \
  142. } while (0)
  143. #define set_current_state(state_value) \
  144. do { \
  145. current->task_state_change = _THIS_IP_; \
  146. smp_store_mb(current->state, (state_value)); \
  147. } while (0)
  148. #else
  149. /*
  150. * set_current_state() includes a barrier so that the write of current->state
  151. * is correctly serialised wrt the caller's subsequent test of whether to
  152. * actually sleep:
  153. *
  154. * for (;;) {
  155. * set_current_state(TASK_UNINTERRUPTIBLE);
  156. * if (!need_sleep)
  157. * break;
  158. *
  159. * schedule();
  160. * }
  161. * __set_current_state(TASK_RUNNING);
  162. *
  163. * If the caller does not need such serialisation (because, for instance, the
  164. * condition test and condition change and wakeup are under the same lock) then
  165. * use __set_current_state().
  166. *
  167. * The above is typically ordered against the wakeup, which does:
  168. *
  169. * need_sleep = false;
  170. * wake_up_state(p, TASK_UNINTERRUPTIBLE);
  171. *
  172. * Where wake_up_state() (and all other wakeup primitives) imply enough
  173. * barriers to order the store of the variable against wakeup.
  174. *
  175. * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
  176. * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
  177. * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
  178. *
  179. * This is obviously fine, since they both store the exact same value.
  180. *
  181. * Also see the comments of try_to_wake_up().
  182. */
  183. #define __set_current_state(state_value) \
  184. do { current->state = (state_value); } while (0)
  185. #define set_current_state(state_value) \
  186. smp_store_mb(current->state, (state_value))
  187. #endif
  188. /* Task command name length */
  189. #define TASK_COMM_LEN 16
  190. #include <linux/spinlock.h>
  191. /*
  192. * This serializes "schedule()" and also protects
  193. * the run-queue from deletions/modifications (but
  194. * _adding_ to the beginning of the run-queue has
  195. * a separate lock).
  196. */
  197. extern rwlock_t tasklist_lock;
  198. extern spinlock_t mmlist_lock;
  199. struct task_struct;
  200. #ifdef CONFIG_PROVE_RCU
  201. extern int lockdep_tasklist_lock_is_held(void);
  202. #endif /* #ifdef CONFIG_PROVE_RCU */
  203. extern void sched_init(void);
  204. extern void sched_init_smp(void);
  205. extern asmlinkage void schedule_tail(struct task_struct *prev);
  206. extern void init_idle(struct task_struct *idle, int cpu);
  207. extern void init_idle_bootup_task(struct task_struct *idle);
  208. extern cpumask_var_t cpu_isolated_map;
  209. extern int runqueue_is_locked(int cpu);
  210. #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
  211. extern void nohz_balance_enter_idle(int cpu);
  212. extern void set_cpu_sd_state_idle(void);
  213. extern int get_nohz_timer_target(void);
  214. #else
  215. static inline void nohz_balance_enter_idle(int cpu) { }
  216. static inline void set_cpu_sd_state_idle(void) { }
  217. #endif
  218. /*
  219. * Only dump TASK_* tasks. (0 for all tasks)
  220. */
  221. extern void show_state_filter(unsigned long state_filter);
  222. static inline void show_state(void)
  223. {
  224. show_state_filter(0);
  225. }
  226. extern void show_regs(struct pt_regs *);
  227. /*
  228. * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
  229. * task), SP is the stack pointer of the first frame that should be shown in the back
  230. * trace (or NULL if the entire call-chain of the task should be shown).
  231. */
  232. extern void show_stack(struct task_struct *task, unsigned long *sp);
  233. extern void cpu_init (void);
  234. extern void trap_init(void);
  235. extern void update_process_times(int user);
  236. extern void scheduler_tick(void);
  237. extern int sched_cpu_starting(unsigned int cpu);
  238. extern int sched_cpu_activate(unsigned int cpu);
  239. extern int sched_cpu_deactivate(unsigned int cpu);
  240. #ifdef CONFIG_HOTPLUG_CPU
  241. extern int sched_cpu_dying(unsigned int cpu);
  242. #else
  243. # define sched_cpu_dying NULL
  244. #endif
  245. extern void sched_show_task(struct task_struct *p);
  246. #ifdef CONFIG_LOCKUP_DETECTOR
  247. extern void touch_softlockup_watchdog_sched(void);
  248. extern void touch_softlockup_watchdog(void);
  249. extern void touch_softlockup_watchdog_sync(void);
  250. extern void touch_all_softlockup_watchdogs(void);
  251. extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
  252. void __user *buffer,
  253. size_t *lenp, loff_t *ppos);
  254. extern unsigned int softlockup_panic;
  255. extern unsigned int hardlockup_panic;
  256. void lockup_detector_init(void);
  257. #else
  258. static inline void touch_softlockup_watchdog_sched(void)
  259. {
  260. }
  261. static inline void touch_softlockup_watchdog(void)
  262. {
  263. }
  264. static inline void touch_softlockup_watchdog_sync(void)
  265. {
  266. }
  267. static inline void touch_all_softlockup_watchdogs(void)
  268. {
  269. }
  270. static inline void lockup_detector_init(void)
  271. {
  272. }
  273. #endif
  274. #ifdef CONFIG_DETECT_HUNG_TASK
  275. void reset_hung_task_detector(void);
  276. #else
  277. static inline void reset_hung_task_detector(void)
  278. {
  279. }
  280. #endif
  281. /* Attach to any functions which should be ignored in wchan output. */
  282. #define __sched __attribute__((__section__(".sched.text")))
  283. /* Linker adds these: start and end of __sched functions */
  284. extern char __sched_text_start[], __sched_text_end[];
  285. /* Is this address in the __sched functions? */
  286. extern int in_sched_functions(unsigned long addr);
  287. #define MAX_SCHEDULE_TIMEOUT LONG_MAX
  288. extern signed long schedule_timeout(signed long timeout);
  289. extern signed long schedule_timeout_interruptible(signed long timeout);
  290. extern signed long schedule_timeout_killable(signed long timeout);
  291. extern signed long schedule_timeout_uninterruptible(signed long timeout);
  292. extern signed long schedule_timeout_idle(signed long timeout);
  293. asmlinkage void schedule(void);
  294. extern void schedule_preempt_disabled(void);
  295. extern int __must_check io_schedule_prepare(void);
  296. extern void io_schedule_finish(int token);
  297. extern long io_schedule_timeout(long timeout);
  298. extern void io_schedule(void);
  299. void __noreturn do_task_dead(void);
  300. struct nsproxy;
  301. struct user_namespace;
  302. #ifdef CONFIG_MMU
  303. extern void arch_pick_mmap_layout(struct mm_struct *mm);
  304. extern unsigned long
  305. arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
  306. unsigned long, unsigned long);
  307. extern unsigned long
  308. arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
  309. unsigned long len, unsigned long pgoff,
  310. unsigned long flags);
  311. #else
  312. static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
  313. #endif
  314. struct pacct_struct {
  315. int ac_flag;
  316. long ac_exitcode;
  317. unsigned long ac_mem;
  318. u64 ac_utime, ac_stime;
  319. unsigned long ac_minflt, ac_majflt;
  320. };
  321. struct cpu_itimer {
  322. u64 expires;
  323. u64 incr;
  324. };
  325. /**
  326. * struct prev_cputime - snaphsot of system and user cputime
  327. * @utime: time spent in user mode
  328. * @stime: time spent in system mode
  329. * @lock: protects the above two fields
  330. *
  331. * Stores previous user/system time values such that we can guarantee
  332. * monotonicity.
  333. */
  334. struct prev_cputime {
  335. #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  336. u64 utime;
  337. u64 stime;
  338. raw_spinlock_t lock;
  339. #endif
  340. };
  341. static inline void prev_cputime_init(struct prev_cputime *prev)
  342. {
  343. #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  344. prev->utime = prev->stime = 0;
  345. raw_spin_lock_init(&prev->lock);
  346. #endif
  347. }
  348. /**
  349. * struct task_cputime - collected CPU time counts
  350. * @utime: time spent in user mode, in nanoseconds
  351. * @stime: time spent in kernel mode, in nanoseconds
  352. * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
  353. *
  354. * This structure groups together three kinds of CPU time that are tracked for
  355. * threads and thread groups. Most things considering CPU time want to group
  356. * these counts together and treat all three of them in parallel.
  357. */
  358. struct task_cputime {
  359. u64 utime;
  360. u64 stime;
  361. unsigned long long sum_exec_runtime;
  362. };
  363. /* Alternate field names when used to cache expirations. */
  364. #define virt_exp utime
  365. #define prof_exp stime
  366. #define sched_exp sum_exec_runtime
  367. /*
  368. * This is the atomic variant of task_cputime, which can be used for
  369. * storing and updating task_cputime statistics without locking.
  370. */
  371. struct task_cputime_atomic {
  372. atomic64_t utime;
  373. atomic64_t stime;
  374. atomic64_t sum_exec_runtime;
  375. };
  376. #define INIT_CPUTIME_ATOMIC \
  377. (struct task_cputime_atomic) { \
  378. .utime = ATOMIC64_INIT(0), \
  379. .stime = ATOMIC64_INIT(0), \
  380. .sum_exec_runtime = ATOMIC64_INIT(0), \
  381. }
  382. #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
  383. /*
  384. * Disable preemption until the scheduler is running -- use an unconditional
  385. * value so that it also works on !PREEMPT_COUNT kernels.
  386. *
  387. * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
  388. */
  389. #define INIT_PREEMPT_COUNT PREEMPT_OFFSET
  390. /*
  391. * Initial preempt_count value; reflects the preempt_count schedule invariant
  392. * which states that during context switches:
  393. *
  394. * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
  395. *
  396. * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
  397. * Note: See finish_task_switch().
  398. */
  399. #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
  400. /**
  401. * struct thread_group_cputimer - thread group interval timer counts
  402. * @cputime_atomic: atomic thread group interval timers.
  403. * @running: true when there are timers running and
  404. * @cputime_atomic receives updates.
  405. * @checking_timer: true when a thread in the group is in the
  406. * process of checking for thread group timers.
  407. *
  408. * This structure contains the version of task_cputime, above, that is
  409. * used for thread group CPU timer calculations.
  410. */
  411. struct thread_group_cputimer {
  412. struct task_cputime_atomic cputime_atomic;
  413. bool running;
  414. bool checking_timer;
  415. };
  416. #include <linux/rwsem.h>
  417. struct autogroup;
  418. /*
  419. * Some day this will be a full-fledged user tracking system..
  420. */
  421. struct user_struct {
  422. atomic_t __count; /* reference count */
  423. atomic_t processes; /* How many processes does this user have? */
  424. atomic_t sigpending; /* How many pending signals does this user have? */
  425. #ifdef CONFIG_FANOTIFY
  426. atomic_t fanotify_listeners;
  427. #endif
  428. #ifdef CONFIG_EPOLL
  429. atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
  430. #endif
  431. #ifdef CONFIG_POSIX_MQUEUE
  432. /* protected by mq_lock */
  433. unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
  434. #endif
  435. unsigned long locked_shm; /* How many pages of mlocked shm ? */
  436. unsigned long unix_inflight; /* How many files in flight in unix sockets */
  437. atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
  438. #ifdef CONFIG_KEYS
  439. struct key *uid_keyring; /* UID specific keyring */
  440. struct key *session_keyring; /* UID's default session keyring */
  441. #endif
  442. /* Hash table maintenance information */
  443. struct hlist_node uidhash_node;
  444. kuid_t uid;
  445. #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
  446. atomic_long_t locked_vm;
  447. #endif
  448. };
  449. extern int uids_sysfs_init(void);
  450. extern struct user_struct *find_user(kuid_t);
  451. extern struct user_struct root_user;
  452. #define INIT_USER (&root_user)
  453. struct backing_dev_info;
  454. struct reclaim_state;
  455. #ifdef CONFIG_SCHED_INFO
  456. struct sched_info {
  457. /* cumulative counters */
  458. unsigned long pcount; /* # of times run on this cpu */
  459. unsigned long long run_delay; /* time spent waiting on a runqueue */
  460. /* timestamps */
  461. unsigned long long last_arrival,/* when we last ran on a cpu */
  462. last_queued; /* when we were last queued to run */
  463. };
  464. #endif /* CONFIG_SCHED_INFO */
  465. struct task_delay_info;
  466. static inline int sched_info_on(void)
  467. {
  468. #ifdef CONFIG_SCHEDSTATS
  469. return 1;
  470. #elif defined(CONFIG_TASK_DELAY_ACCT)
  471. extern int delayacct_on;
  472. return delayacct_on;
  473. #else
  474. return 0;
  475. #endif
  476. }
  477. #ifdef CONFIG_SCHEDSTATS
  478. void force_schedstat_enabled(void);
  479. #endif
  480. /*
  481. * Integer metrics need fixed point arithmetic, e.g., sched/fair
  482. * has a few: load, load_avg, util_avg, freq, and capacity.
  483. *
  484. * We define a basic fixed point arithmetic range, and then formalize
  485. * all these metrics based on that basic range.
  486. */
  487. # define SCHED_FIXEDPOINT_SHIFT 10
  488. # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
  489. struct io_context; /* See blkdev.h */
  490. #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
  491. extern void prefetch_stack(struct task_struct *t);
  492. #else
  493. static inline void prefetch_stack(struct task_struct *t) { }
  494. #endif
  495. struct audit_context; /* See audit.c */
  496. struct mempolicy;
  497. struct pipe_inode_info;
  498. struct uts_namespace;
  499. struct load_weight {
  500. unsigned long weight;
  501. u32 inv_weight;
  502. };
  503. /*
  504. * The load_avg/util_avg accumulates an infinite geometric series
  505. * (see __update_load_avg() in kernel/sched/fair.c).
  506. *
  507. * [load_avg definition]
  508. *
  509. * load_avg = runnable% * scale_load_down(load)
  510. *
  511. * where runnable% is the time ratio that a sched_entity is runnable.
  512. * For cfs_rq, it is the aggregated load_avg of all runnable and
  513. * blocked sched_entities.
  514. *
  515. * load_avg may also take frequency scaling into account:
  516. *
  517. * load_avg = runnable% * scale_load_down(load) * freq%
  518. *
  519. * where freq% is the CPU frequency normalized to the highest frequency.
  520. *
  521. * [util_avg definition]
  522. *
  523. * util_avg = running% * SCHED_CAPACITY_SCALE
  524. *
  525. * where running% is the time ratio that a sched_entity is running on
  526. * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
  527. * and blocked sched_entities.
  528. *
  529. * util_avg may also factor frequency scaling and CPU capacity scaling:
  530. *
  531. * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
  532. *
  533. * where freq% is the same as above, and capacity% is the CPU capacity
  534. * normalized to the greatest capacity (due to uarch differences, etc).
  535. *
  536. * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
  537. * themselves are in the range of [0, 1]. To do fixed point arithmetics,
  538. * we therefore scale them to as large a range as necessary. This is for
  539. * example reflected by util_avg's SCHED_CAPACITY_SCALE.
  540. *
  541. * [Overflow issue]
  542. *
  543. * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
  544. * with the highest load (=88761), always runnable on a single cfs_rq,
  545. * and should not overflow as the number already hits PID_MAX_LIMIT.
  546. *
  547. * For all other cases (including 32-bit kernels), struct load_weight's
  548. * weight will overflow first before we do, because:
  549. *
  550. * Max(load_avg) <= Max(load.weight)
  551. *
  552. * Then it is the load_weight's responsibility to consider overflow
  553. * issues.
  554. */
  555. struct sched_avg {
  556. u64 last_update_time, load_sum;
  557. u32 util_sum, period_contrib;
  558. unsigned long load_avg, util_avg;
  559. };
  560. #ifdef CONFIG_SCHEDSTATS
  561. struct sched_statistics {
  562. u64 wait_start;
  563. u64 wait_max;
  564. u64 wait_count;
  565. u64 wait_sum;
  566. u64 iowait_count;
  567. u64 iowait_sum;
  568. u64 sleep_start;
  569. u64 sleep_max;
  570. s64 sum_sleep_runtime;
  571. u64 block_start;
  572. u64 block_max;
  573. u64 exec_max;
  574. u64 slice_max;
  575. u64 nr_migrations_cold;
  576. u64 nr_failed_migrations_affine;
  577. u64 nr_failed_migrations_running;
  578. u64 nr_failed_migrations_hot;
  579. u64 nr_forced_migrations;
  580. u64 nr_wakeups;
  581. u64 nr_wakeups_sync;
  582. u64 nr_wakeups_migrate;
  583. u64 nr_wakeups_local;
  584. u64 nr_wakeups_remote;
  585. u64 nr_wakeups_affine;
  586. u64 nr_wakeups_affine_attempts;
  587. u64 nr_wakeups_passive;
  588. u64 nr_wakeups_idle;
  589. };
  590. #endif
  591. struct sched_entity {
  592. struct load_weight load; /* for load-balancing */
  593. struct rb_node run_node;
  594. struct list_head group_node;
  595. unsigned int on_rq;
  596. u64 exec_start;
  597. u64 sum_exec_runtime;
  598. u64 vruntime;
  599. u64 prev_sum_exec_runtime;
  600. u64 nr_migrations;
  601. #ifdef CONFIG_SCHEDSTATS
  602. struct sched_statistics statistics;
  603. #endif
  604. #ifdef CONFIG_FAIR_GROUP_SCHED
  605. int depth;
  606. struct sched_entity *parent;
  607. /* rq on which this entity is (to be) queued: */
  608. struct cfs_rq *cfs_rq;
  609. /* rq "owned" by this entity/group: */
  610. struct cfs_rq *my_q;
  611. #endif
  612. #ifdef CONFIG_SMP
  613. /*
  614. * Per entity load average tracking.
  615. *
  616. * Put into separate cache line so it does not
  617. * collide with read-mostly values above.
  618. */
  619. struct sched_avg avg ____cacheline_aligned_in_smp;
  620. #endif
  621. };
  622. struct sched_rt_entity {
  623. struct list_head run_list;
  624. unsigned long timeout;
  625. unsigned long watchdog_stamp;
  626. unsigned int time_slice;
  627. unsigned short on_rq;
  628. unsigned short on_list;
  629. struct sched_rt_entity *back;
  630. #ifdef CONFIG_RT_GROUP_SCHED
  631. struct sched_rt_entity *parent;
  632. /* rq on which this entity is (to be) queued: */
  633. struct rt_rq *rt_rq;
  634. /* rq "owned" by this entity/group: */
  635. struct rt_rq *my_q;
  636. #endif
  637. };
  638. struct sched_dl_entity {
  639. struct rb_node rb_node;
  640. /*
  641. * Original scheduling parameters. Copied here from sched_attr
  642. * during sched_setattr(), they will remain the same until
  643. * the next sched_setattr().
  644. */
  645. u64 dl_runtime; /* maximum runtime for each instance */
  646. u64 dl_deadline; /* relative deadline of each instance */
  647. u64 dl_period; /* separation of two instances (period) */
  648. u64 dl_bw; /* dl_runtime / dl_deadline */
  649. /*
  650. * Actual scheduling parameters. Initialized with the values above,
  651. * they are continously updated during task execution. Note that
  652. * the remaining runtime could be < 0 in case we are in overrun.
  653. */
  654. s64 runtime; /* remaining runtime for this instance */
  655. u64 deadline; /* absolute deadline for this instance */
  656. unsigned int flags; /* specifying the scheduler behaviour */
  657. /*
  658. * Some bool flags:
  659. *
  660. * @dl_throttled tells if we exhausted the runtime. If so, the
  661. * task has to wait for a replenishment to be performed at the
  662. * next firing of dl_timer.
  663. *
  664. * @dl_boosted tells if we are boosted due to DI. If so we are
  665. * outside bandwidth enforcement mechanism (but only until we
  666. * exit the critical section);
  667. *
  668. * @dl_yielded tells if task gave up the cpu before consuming
  669. * all its available runtime during the last job.
  670. */
  671. int dl_throttled, dl_boosted, dl_yielded;
  672. /*
  673. * Bandwidth enforcement timer. Each -deadline task has its
  674. * own bandwidth to be enforced, thus we need one timer per task.
  675. */
  676. struct hrtimer dl_timer;
  677. };
  678. union rcu_special {
  679. struct {
  680. u8 blocked;
  681. u8 need_qs;
  682. u8 exp_need_qs;
  683. u8 pad; /* Otherwise the compiler can store garbage here. */
  684. } b; /* Bits. */
  685. u32 s; /* Set of bits. */
  686. };
  687. struct rcu_node;
  688. enum perf_event_task_context {
  689. perf_invalid_context = -1,
  690. perf_hw_context = 0,
  691. perf_sw_context,
  692. perf_nr_task_contexts,
  693. };
  694. struct wake_q_node {
  695. struct wake_q_node *next;
  696. };
  697. /* Track pages that require TLB flushes */
  698. struct tlbflush_unmap_batch {
  699. /*
  700. * Each bit set is a CPU that potentially has a TLB entry for one of
  701. * the PFNs being flushed. See set_tlb_ubc_flush_pending().
  702. */
  703. struct cpumask cpumask;
  704. /* True if any bit in cpumask is set */
  705. bool flush_required;
  706. /*
  707. * If true then the PTE was dirty when unmapped. The entry must be
  708. * flushed before IO is initiated or a stale TLB entry potentially
  709. * allows an update without redirtying the page.
  710. */
  711. bool writable;
  712. };
  713. struct task_struct {
  714. #ifdef CONFIG_THREAD_INFO_IN_TASK
  715. /*
  716. * For reasons of header soup (see current_thread_info()), this
  717. * must be the first element of task_struct.
  718. */
  719. struct thread_info thread_info;
  720. #endif
  721. volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
  722. void *stack;
  723. atomic_t usage;
  724. unsigned int flags; /* per process flags, defined below */
  725. unsigned int ptrace;
  726. #ifdef CONFIG_SMP
  727. struct llist_node wake_entry;
  728. int on_cpu;
  729. #ifdef CONFIG_THREAD_INFO_IN_TASK
  730. unsigned int cpu; /* current CPU */
  731. #endif
  732. unsigned int wakee_flips;
  733. unsigned long wakee_flip_decay_ts;
  734. struct task_struct *last_wakee;
  735. int wake_cpu;
  736. #endif
  737. int on_rq;
  738. int prio, static_prio, normal_prio;
  739. unsigned int rt_priority;
  740. const struct sched_class *sched_class;
  741. struct sched_entity se;
  742. struct sched_rt_entity rt;
  743. #ifdef CONFIG_CGROUP_SCHED
  744. struct task_group *sched_task_group;
  745. #endif
  746. struct sched_dl_entity dl;
  747. #ifdef CONFIG_PREEMPT_NOTIFIERS
  748. /* list of struct preempt_notifier: */
  749. struct hlist_head preempt_notifiers;
  750. #endif
  751. #ifdef CONFIG_BLK_DEV_IO_TRACE
  752. unsigned int btrace_seq;
  753. #endif
  754. unsigned int policy;
  755. int nr_cpus_allowed;
  756. cpumask_t cpus_allowed;
  757. #ifdef CONFIG_PREEMPT_RCU
  758. int rcu_read_lock_nesting;
  759. union rcu_special rcu_read_unlock_special;
  760. struct list_head rcu_node_entry;
  761. struct rcu_node *rcu_blocked_node;
  762. #endif /* #ifdef CONFIG_PREEMPT_RCU */
  763. #ifdef CONFIG_TASKS_RCU
  764. unsigned long rcu_tasks_nvcsw;
  765. bool rcu_tasks_holdout;
  766. struct list_head rcu_tasks_holdout_list;
  767. int rcu_tasks_idle_cpu;
  768. #endif /* #ifdef CONFIG_TASKS_RCU */
  769. #ifdef CONFIG_SCHED_INFO
  770. struct sched_info sched_info;
  771. #endif
  772. struct list_head tasks;
  773. #ifdef CONFIG_SMP
  774. struct plist_node pushable_tasks;
  775. struct rb_node pushable_dl_tasks;
  776. #endif
  777. struct mm_struct *mm, *active_mm;
  778. /* Per-thread vma caching: */
  779. struct vmacache vmacache;
  780. #if defined(SPLIT_RSS_COUNTING)
  781. struct task_rss_stat rss_stat;
  782. #endif
  783. /* task state */
  784. int exit_state;
  785. int exit_code, exit_signal;
  786. int pdeath_signal; /* The signal sent when the parent dies */
  787. unsigned long jobctl; /* JOBCTL_*, siglock protected */
  788. /* Used for emulating ABI behavior of previous Linux versions */
  789. unsigned int personality;
  790. /* scheduler bits, serialized by scheduler locks */
  791. unsigned sched_reset_on_fork:1;
  792. unsigned sched_contributes_to_load:1;
  793. unsigned sched_migrated:1;
  794. unsigned sched_remote_wakeup:1;
  795. unsigned :0; /* force alignment to the next boundary */
  796. /* unserialized, strictly 'current' */
  797. unsigned in_execve:1; /* bit to tell LSMs we're in execve */
  798. unsigned in_iowait:1;
  799. #if !defined(TIF_RESTORE_SIGMASK)
  800. unsigned restore_sigmask:1;
  801. #endif
  802. #ifdef CONFIG_MEMCG
  803. unsigned memcg_may_oom:1;
  804. #ifndef CONFIG_SLOB
  805. unsigned memcg_kmem_skip_account:1;
  806. #endif
  807. #endif
  808. #ifdef CONFIG_COMPAT_BRK
  809. unsigned brk_randomized:1;
  810. #endif
  811. unsigned long atomic_flags; /* Flags needing atomic access. */
  812. struct restart_block restart_block;
  813. pid_t pid;
  814. pid_t tgid;
  815. #ifdef CONFIG_CC_STACKPROTECTOR
  816. /* Canary value for the -fstack-protector gcc feature */
  817. unsigned long stack_canary;
  818. #endif
  819. /*
  820. * pointers to (original) parent process, youngest child, younger sibling,
  821. * older sibling, respectively. (p->father can be replaced with
  822. * p->real_parent->pid)
  823. */
  824. struct task_struct __rcu *real_parent; /* real parent process */
  825. struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
  826. /*
  827. * children/sibling forms the list of my natural children
  828. */
  829. struct list_head children; /* list of my children */
  830. struct list_head sibling; /* linkage in my parent's children list */
  831. struct task_struct *group_leader; /* threadgroup leader */
  832. /*
  833. * ptraced is the list of tasks this task is using ptrace on.
  834. * This includes both natural children and PTRACE_ATTACH targets.
  835. * p->ptrace_entry is p's link on the p->parent->ptraced list.
  836. */
  837. struct list_head ptraced;
  838. struct list_head ptrace_entry;
  839. /* PID/PID hash table linkage. */
  840. struct pid_link pids[PIDTYPE_MAX];
  841. struct list_head thread_group;
  842. struct list_head thread_node;
  843. struct completion *vfork_done; /* for vfork() */
  844. int __user *set_child_tid; /* CLONE_CHILD_SETTID */
  845. int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
  846. u64 utime, stime;
  847. #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
  848. u64 utimescaled, stimescaled;
  849. #endif
  850. u64 gtime;
  851. struct prev_cputime prev_cputime;
  852. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  853. seqcount_t vtime_seqcount;
  854. unsigned long long vtime_snap;
  855. enum {
  856. /* Task is sleeping or running in a CPU with VTIME inactive */
  857. VTIME_INACTIVE = 0,
  858. /* Task runs in userspace in a CPU with VTIME active */
  859. VTIME_USER,
  860. /* Task runs in kernelspace in a CPU with VTIME active */
  861. VTIME_SYS,
  862. } vtime_snap_whence;
  863. #endif
  864. #ifdef CONFIG_NO_HZ_FULL
  865. atomic_t tick_dep_mask;
  866. #endif
  867. unsigned long nvcsw, nivcsw; /* context switch counts */
  868. u64 start_time; /* monotonic time in nsec */
  869. u64 real_start_time; /* boot based time in nsec */
  870. /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
  871. unsigned long min_flt, maj_flt;
  872. #ifdef CONFIG_POSIX_TIMERS
  873. struct task_cputime cputime_expires;
  874. struct list_head cpu_timers[3];
  875. #endif
  876. /* process credentials */
  877. const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
  878. const struct cred __rcu *real_cred; /* objective and real subjective task
  879. * credentials (COW) */
  880. const struct cred __rcu *cred; /* effective (overridable) subjective task
  881. * credentials (COW) */
  882. char comm[TASK_COMM_LEN]; /* executable name excluding path
  883. - access with [gs]et_task_comm (which lock
  884. it with task_lock())
  885. - initialized normally by setup_new_exec */
  886. /* file system info */
  887. struct nameidata *nameidata;
  888. #ifdef CONFIG_SYSVIPC
  889. /* ipc stuff */
  890. struct sysv_sem sysvsem;
  891. struct sysv_shm sysvshm;
  892. #endif
  893. #ifdef CONFIG_DETECT_HUNG_TASK
  894. /* hung task detection */
  895. unsigned long last_switch_count;
  896. #endif
  897. /* filesystem information */
  898. struct fs_struct *fs;
  899. /* open file information */
  900. struct files_struct *files;
  901. /* namespaces */
  902. struct nsproxy *nsproxy;
  903. /* signal handlers */
  904. struct signal_struct *signal;
  905. struct sighand_struct *sighand;
  906. sigset_t blocked, real_blocked;
  907. sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
  908. struct sigpending pending;
  909. unsigned long sas_ss_sp;
  910. size_t sas_ss_size;
  911. unsigned sas_ss_flags;
  912. struct callback_head *task_works;
  913. struct audit_context *audit_context;
  914. #ifdef CONFIG_AUDITSYSCALL
  915. kuid_t loginuid;
  916. unsigned int sessionid;
  917. #endif
  918. struct seccomp seccomp;
  919. /* Thread group tracking */
  920. u32 parent_exec_id;
  921. u32 self_exec_id;
  922. /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
  923. * mempolicy */
  924. spinlock_t alloc_lock;
  925. /* Protection of the PI data structures: */
  926. raw_spinlock_t pi_lock;
  927. struct wake_q_node wake_q;
  928. #ifdef CONFIG_RT_MUTEXES
  929. /* PI waiters blocked on a rt_mutex held by this task */
  930. struct rb_root pi_waiters;
  931. struct rb_node *pi_waiters_leftmost;
  932. /* Deadlock detection and priority inheritance handling */
  933. struct rt_mutex_waiter *pi_blocked_on;
  934. #endif
  935. #ifdef CONFIG_DEBUG_MUTEXES
  936. /* mutex deadlock detection */
  937. struct mutex_waiter *blocked_on;
  938. #endif
  939. #ifdef CONFIG_TRACE_IRQFLAGS
  940. unsigned int irq_events;
  941. unsigned long hardirq_enable_ip;
  942. unsigned long hardirq_disable_ip;
  943. unsigned int hardirq_enable_event;
  944. unsigned int hardirq_disable_event;
  945. int hardirqs_enabled;
  946. int hardirq_context;
  947. unsigned long softirq_disable_ip;
  948. unsigned long softirq_enable_ip;
  949. unsigned int softirq_disable_event;
  950. unsigned int softirq_enable_event;
  951. int softirqs_enabled;
  952. int softirq_context;
  953. #endif
  954. #ifdef CONFIG_LOCKDEP
  955. # define MAX_LOCK_DEPTH 48UL
  956. u64 curr_chain_key;
  957. int lockdep_depth;
  958. unsigned int lockdep_recursion;
  959. struct held_lock held_locks[MAX_LOCK_DEPTH];
  960. gfp_t lockdep_reclaim_gfp;
  961. #endif
  962. #ifdef CONFIG_UBSAN
  963. unsigned int in_ubsan;
  964. #endif
  965. /* journalling filesystem info */
  966. void *journal_info;
  967. /* stacked block device info */
  968. struct bio_list *bio_list;
  969. #ifdef CONFIG_BLOCK
  970. /* stack plugging */
  971. struct blk_plug *plug;
  972. #endif
  973. /* VM state */
  974. struct reclaim_state *reclaim_state;
  975. struct backing_dev_info *backing_dev_info;
  976. struct io_context *io_context;
  977. unsigned long ptrace_message;
  978. siginfo_t *last_siginfo; /* For ptrace use. */
  979. struct task_io_accounting ioac;
  980. #if defined(CONFIG_TASK_XACCT)
  981. u64 acct_rss_mem1; /* accumulated rss usage */
  982. u64 acct_vm_mem1; /* accumulated virtual memory usage */
  983. u64 acct_timexpd; /* stime + utime since last update */
  984. #endif
  985. #ifdef CONFIG_CPUSETS
  986. nodemask_t mems_allowed; /* Protected by alloc_lock */
  987. seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
  988. int cpuset_mem_spread_rotor;
  989. int cpuset_slab_spread_rotor;
  990. #endif
  991. #ifdef CONFIG_CGROUPS
  992. /* Control Group info protected by css_set_lock */
  993. struct css_set __rcu *cgroups;
  994. /* cg_list protected by css_set_lock and tsk->alloc_lock */
  995. struct list_head cg_list;
  996. #endif
  997. #ifdef CONFIG_INTEL_RDT_A
  998. int closid;
  999. #endif
  1000. #ifdef CONFIG_FUTEX
  1001. struct robust_list_head __user *robust_list;
  1002. #ifdef CONFIG_COMPAT
  1003. struct compat_robust_list_head __user *compat_robust_list;
  1004. #endif
  1005. struct list_head pi_state_list;
  1006. struct futex_pi_state *pi_state_cache;
  1007. #endif
  1008. #ifdef CONFIG_PERF_EVENTS
  1009. struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
  1010. struct mutex perf_event_mutex;
  1011. struct list_head perf_event_list;
  1012. #endif
  1013. #ifdef CONFIG_DEBUG_PREEMPT
  1014. unsigned long preempt_disable_ip;
  1015. #endif
  1016. #ifdef CONFIG_NUMA
  1017. struct mempolicy *mempolicy; /* Protected by alloc_lock */
  1018. short il_next;
  1019. short pref_node_fork;
  1020. #endif
  1021. #ifdef CONFIG_NUMA_BALANCING
  1022. int numa_scan_seq;
  1023. unsigned int numa_scan_period;
  1024. unsigned int numa_scan_period_max;
  1025. int numa_preferred_nid;
  1026. unsigned long numa_migrate_retry;
  1027. u64 node_stamp; /* migration stamp */
  1028. u64 last_task_numa_placement;
  1029. u64 last_sum_exec_runtime;
  1030. struct callback_head numa_work;
  1031. struct list_head numa_entry;
  1032. struct numa_group *numa_group;
  1033. /*
  1034. * numa_faults is an array split into four regions:
  1035. * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
  1036. * in this precise order.
  1037. *
  1038. * faults_memory: Exponential decaying average of faults on a per-node
  1039. * basis. Scheduling placement decisions are made based on these
  1040. * counts. The values remain static for the duration of a PTE scan.
  1041. * faults_cpu: Track the nodes the process was running on when a NUMA
  1042. * hinting fault was incurred.
  1043. * faults_memory_buffer and faults_cpu_buffer: Record faults per node
  1044. * during the current scan window. When the scan completes, the counts
  1045. * in faults_memory and faults_cpu decay and these values are copied.
  1046. */
  1047. unsigned long *numa_faults;
  1048. unsigned long total_numa_faults;
  1049. /*
  1050. * numa_faults_locality tracks if faults recorded during the last
  1051. * scan window were remote/local or failed to migrate. The task scan
  1052. * period is adapted based on the locality of the faults with different
  1053. * weights depending on whether they were shared or private faults
  1054. */
  1055. unsigned long numa_faults_locality[3];
  1056. unsigned long numa_pages_migrated;
  1057. #endif /* CONFIG_NUMA_BALANCING */
  1058. #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
  1059. struct tlbflush_unmap_batch tlb_ubc;
  1060. #endif
  1061. struct rcu_head rcu;
  1062. /*
  1063. * cache last used pipe for splice
  1064. */
  1065. struct pipe_inode_info *splice_pipe;
  1066. struct page_frag task_frag;
  1067. #ifdef CONFIG_TASK_DELAY_ACCT
  1068. struct task_delay_info *delays;
  1069. #endif
  1070. #ifdef CONFIG_FAULT_INJECTION
  1071. int make_it_fail;
  1072. #endif
  1073. /*
  1074. * when (nr_dirtied >= nr_dirtied_pause), it's time to call
  1075. * balance_dirty_pages() for some dirty throttling pause
  1076. */
  1077. int nr_dirtied;
  1078. int nr_dirtied_pause;
  1079. unsigned long dirty_paused_when; /* start of a write-and-pause period */
  1080. #ifdef CONFIG_LATENCYTOP
  1081. int latency_record_count;
  1082. struct latency_record latency_record[LT_SAVECOUNT];
  1083. #endif
  1084. /*
  1085. * time slack values; these are used to round up poll() and
  1086. * select() etc timeout values. These are in nanoseconds.
  1087. */
  1088. u64 timer_slack_ns;
  1089. u64 default_timer_slack_ns;
  1090. #ifdef CONFIG_KASAN
  1091. unsigned int kasan_depth;
  1092. #endif
  1093. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1094. /* Index of current stored address in ret_stack */
  1095. int curr_ret_stack;
  1096. /* Stack of return addresses for return function tracing */
  1097. struct ftrace_ret_stack *ret_stack;
  1098. /* time stamp for last schedule */
  1099. unsigned long long ftrace_timestamp;
  1100. /*
  1101. * Number of functions that haven't been traced
  1102. * because of depth overrun.
  1103. */
  1104. atomic_t trace_overrun;
  1105. /* Pause for the tracing */
  1106. atomic_t tracing_graph_pause;
  1107. #endif
  1108. #ifdef CONFIG_TRACING
  1109. /* state flags for use by tracers */
  1110. unsigned long trace;
  1111. /* bitmask and counter of trace recursion */
  1112. unsigned long trace_recursion;
  1113. #endif /* CONFIG_TRACING */
  1114. #ifdef CONFIG_KCOV
  1115. /* Coverage collection mode enabled for this task (0 if disabled). */
  1116. enum kcov_mode kcov_mode;
  1117. /* Size of the kcov_area. */
  1118. unsigned kcov_size;
  1119. /* Buffer for coverage collection. */
  1120. void *kcov_area;
  1121. /* kcov desciptor wired with this task or NULL. */
  1122. struct kcov *kcov;
  1123. #endif
  1124. #ifdef CONFIG_MEMCG
  1125. struct mem_cgroup *memcg_in_oom;
  1126. gfp_t memcg_oom_gfp_mask;
  1127. int memcg_oom_order;
  1128. /* number of pages to reclaim on returning to userland */
  1129. unsigned int memcg_nr_pages_over_high;
  1130. #endif
  1131. #ifdef CONFIG_UPROBES
  1132. struct uprobe_task *utask;
  1133. #endif
  1134. #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
  1135. unsigned int sequential_io;
  1136. unsigned int sequential_io_avg;
  1137. #endif
  1138. #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
  1139. unsigned long task_state_change;
  1140. #endif
  1141. int pagefault_disabled;
  1142. #ifdef CONFIG_MMU
  1143. struct task_struct *oom_reaper_list;
  1144. #endif
  1145. #ifdef CONFIG_VMAP_STACK
  1146. struct vm_struct *stack_vm_area;
  1147. #endif
  1148. #ifdef CONFIG_THREAD_INFO_IN_TASK
  1149. /* A live task holds one reference. */
  1150. atomic_t stack_refcount;
  1151. #endif
  1152. /* CPU-specific state of this task */
  1153. struct thread_struct thread;
  1154. /*
  1155. * WARNING: on x86, 'thread_struct' contains a variable-sized
  1156. * structure. It *MUST* be at the end of 'task_struct'.
  1157. *
  1158. * Do not put anything below here!
  1159. */
  1160. };
  1161. #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
  1162. extern int arch_task_struct_size __read_mostly;
  1163. #else
  1164. # define arch_task_struct_size (sizeof(struct task_struct))
  1165. #endif
  1166. #ifdef CONFIG_VMAP_STACK
  1167. static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
  1168. {
  1169. return t->stack_vm_area;
  1170. }
  1171. #else
  1172. static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
  1173. {
  1174. return NULL;
  1175. }
  1176. #endif
  1177. #define TNF_MIGRATED 0x01
  1178. #define TNF_NO_GROUP 0x02
  1179. #define TNF_SHARED 0x04
  1180. #define TNF_FAULT_LOCAL 0x08
  1181. #define TNF_MIGRATE_FAIL 0x10
  1182. static inline bool in_vfork(struct task_struct *tsk)
  1183. {
  1184. bool ret;
  1185. /*
  1186. * need RCU to access ->real_parent if CLONE_VM was used along with
  1187. * CLONE_PARENT.
  1188. *
  1189. * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
  1190. * imply CLONE_VM
  1191. *
  1192. * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
  1193. * ->real_parent is not necessarily the task doing vfork(), so in
  1194. * theory we can't rely on task_lock() if we want to dereference it.
  1195. *
  1196. * And in this case we can't trust the real_parent->mm == tsk->mm
  1197. * check, it can be false negative. But we do not care, if init or
  1198. * another oom-unkillable task does this it should blame itself.
  1199. */
  1200. rcu_read_lock();
  1201. ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
  1202. rcu_read_unlock();
  1203. return ret;
  1204. }
  1205. #ifdef CONFIG_NUMA_BALANCING
  1206. extern void task_numa_fault(int last_node, int node, int pages, int flags);
  1207. extern pid_t task_numa_group_id(struct task_struct *p);
  1208. extern void set_numabalancing_state(bool enabled);
  1209. extern void task_numa_free(struct task_struct *p);
  1210. extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
  1211. int src_nid, int dst_cpu);
  1212. #else
  1213. static inline void task_numa_fault(int last_node, int node, int pages,
  1214. int flags)
  1215. {
  1216. }
  1217. static inline pid_t task_numa_group_id(struct task_struct *p)
  1218. {
  1219. return 0;
  1220. }
  1221. static inline void set_numabalancing_state(bool enabled)
  1222. {
  1223. }
  1224. static inline void task_numa_free(struct task_struct *p)
  1225. {
  1226. }
  1227. static inline bool should_numa_migrate_memory(struct task_struct *p,
  1228. struct page *page, int src_nid, int dst_cpu)
  1229. {
  1230. return true;
  1231. }
  1232. #endif
  1233. static inline struct pid *task_pid(struct task_struct *task)
  1234. {
  1235. return task->pids[PIDTYPE_PID].pid;
  1236. }
  1237. static inline struct pid *task_tgid(struct task_struct *task)
  1238. {
  1239. return task->group_leader->pids[PIDTYPE_PID].pid;
  1240. }
  1241. /*
  1242. * Without tasklist or rcu lock it is not safe to dereference
  1243. * the result of task_pgrp/task_session even if task == current,
  1244. * we can race with another thread doing sys_setsid/sys_setpgid.
  1245. */
  1246. static inline struct pid *task_pgrp(struct task_struct *task)
  1247. {
  1248. return task->group_leader->pids[PIDTYPE_PGID].pid;
  1249. }
  1250. static inline struct pid *task_session(struct task_struct *task)
  1251. {
  1252. return task->group_leader->pids[PIDTYPE_SID].pid;
  1253. }
  1254. struct pid_namespace;
  1255. /*
  1256. * the helpers to get the task's different pids as they are seen
  1257. * from various namespaces
  1258. *
  1259. * task_xid_nr() : global id, i.e. the id seen from the init namespace;
  1260. * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
  1261. * current.
  1262. * task_xid_nr_ns() : id seen from the ns specified;
  1263. *
  1264. * set_task_vxid() : assigns a virtual id to a task;
  1265. *
  1266. * see also pid_nr() etc in include/linux/pid.h
  1267. */
  1268. pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
  1269. struct pid_namespace *ns);
  1270. static inline pid_t task_pid_nr(struct task_struct *tsk)
  1271. {
  1272. return tsk->pid;
  1273. }
  1274. static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
  1275. struct pid_namespace *ns)
  1276. {
  1277. return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
  1278. }
  1279. static inline pid_t task_pid_vnr(struct task_struct *tsk)
  1280. {
  1281. return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
  1282. }
  1283. static inline pid_t task_tgid_nr(struct task_struct *tsk)
  1284. {
  1285. return tsk->tgid;
  1286. }
  1287. pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
  1288. static inline pid_t task_tgid_vnr(struct task_struct *tsk)
  1289. {
  1290. return pid_vnr(task_tgid(tsk));
  1291. }
  1292. static inline int pid_alive(const struct task_struct *p);
  1293. static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
  1294. {
  1295. pid_t pid = 0;
  1296. rcu_read_lock();
  1297. if (pid_alive(tsk))
  1298. pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
  1299. rcu_read_unlock();
  1300. return pid;
  1301. }
  1302. static inline pid_t task_ppid_nr(const struct task_struct *tsk)
  1303. {
  1304. return task_ppid_nr_ns(tsk, &init_pid_ns);
  1305. }
  1306. static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
  1307. struct pid_namespace *ns)
  1308. {
  1309. return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
  1310. }
  1311. static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
  1312. {
  1313. return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
  1314. }
  1315. static inline pid_t task_session_nr_ns(struct task_struct *tsk,
  1316. struct pid_namespace *ns)
  1317. {
  1318. return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
  1319. }
  1320. static inline pid_t task_session_vnr(struct task_struct *tsk)
  1321. {
  1322. return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
  1323. }
  1324. /* obsolete, do not use */
  1325. static inline pid_t task_pgrp_nr(struct task_struct *tsk)
  1326. {
  1327. return task_pgrp_nr_ns(tsk, &init_pid_ns);
  1328. }
  1329. /**
  1330. * pid_alive - check that a task structure is not stale
  1331. * @p: Task structure to be checked.
  1332. *
  1333. * Test if a process is not yet dead (at most zombie state)
  1334. * If pid_alive fails, then pointers within the task structure
  1335. * can be stale and must not be dereferenced.
  1336. *
  1337. * Return: 1 if the process is alive. 0 otherwise.
  1338. */
  1339. static inline int pid_alive(const struct task_struct *p)
  1340. {
  1341. return p->pids[PIDTYPE_PID].pid != NULL;
  1342. }
  1343. /**
  1344. * is_global_init - check if a task structure is init. Since init
  1345. * is free to have sub-threads we need to check tgid.
  1346. * @tsk: Task structure to be checked.
  1347. *
  1348. * Check if a task structure is the first user space task the kernel created.
  1349. *
  1350. * Return: 1 if the task structure is init. 0 otherwise.
  1351. */
  1352. static inline int is_global_init(struct task_struct *tsk)
  1353. {
  1354. return task_tgid_nr(tsk) == 1;
  1355. }
  1356. extern struct pid *cad_pid;
  1357. extern void free_task(struct task_struct *tsk);
  1358. #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
  1359. extern void __put_task_struct(struct task_struct *t);
  1360. static inline void put_task_struct(struct task_struct *t)
  1361. {
  1362. if (atomic_dec_and_test(&t->usage))
  1363. __put_task_struct(t);
  1364. }
  1365. struct task_struct *task_rcu_dereference(struct task_struct **ptask);
  1366. struct task_struct *try_get_task_struct(struct task_struct **ptask);
  1367. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  1368. extern void task_cputime(struct task_struct *t,
  1369. u64 *utime, u64 *stime);
  1370. extern u64 task_gtime(struct task_struct *t);
  1371. #else
  1372. static inline void task_cputime(struct task_struct *t,
  1373. u64 *utime, u64 *stime)
  1374. {
  1375. *utime = t->utime;
  1376. *stime = t->stime;
  1377. }
  1378. static inline u64 task_gtime(struct task_struct *t)
  1379. {
  1380. return t->gtime;
  1381. }
  1382. #endif
  1383. #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
  1384. static inline void task_cputime_scaled(struct task_struct *t,
  1385. u64 *utimescaled,
  1386. u64 *stimescaled)
  1387. {
  1388. *utimescaled = t->utimescaled;
  1389. *stimescaled = t->stimescaled;
  1390. }
  1391. #else
  1392. static inline void task_cputime_scaled(struct task_struct *t,
  1393. u64 *utimescaled,
  1394. u64 *stimescaled)
  1395. {
  1396. task_cputime(t, utimescaled, stimescaled);
  1397. }
  1398. #endif
  1399. extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
  1400. extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
  1401. /*
  1402. * Per process flags
  1403. */
  1404. #define PF_IDLE 0x00000002 /* I am an IDLE thread */
  1405. #define PF_EXITING 0x00000004 /* getting shut down */
  1406. #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
  1407. #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
  1408. #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
  1409. #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
  1410. #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
  1411. #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
  1412. #define PF_DUMPCORE 0x00000200 /* dumped core */
  1413. #define PF_SIGNALED 0x00000400 /* killed by a signal */
  1414. #define PF_MEMALLOC 0x00000800 /* Allocating memory */
  1415. #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
  1416. #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
  1417. #define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
  1418. #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
  1419. #define PF_FROZEN 0x00010000 /* frozen for system suspend */
  1420. #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
  1421. #define PF_KSWAPD 0x00040000 /* I am kswapd */
  1422. #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
  1423. #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
  1424. #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
  1425. #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
  1426. #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
  1427. #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
  1428. #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
  1429. #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
  1430. #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
  1431. #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
  1432. /*
  1433. * Only the _current_ task can read/write to tsk->flags, but other
  1434. * tasks can access tsk->flags in readonly mode for example
  1435. * with tsk_used_math (like during threaded core dumping).
  1436. * There is however an exception to this rule during ptrace
  1437. * or during fork: the ptracer task is allowed to write to the
  1438. * child->flags of its traced child (same goes for fork, the parent
  1439. * can write to the child->flags), because we're guaranteed the
  1440. * child is not running and in turn not changing child->flags
  1441. * at the same time the parent does it.
  1442. */
  1443. #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
  1444. #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
  1445. #define clear_used_math() clear_stopped_child_used_math(current)
  1446. #define set_used_math() set_stopped_child_used_math(current)
  1447. #define conditional_stopped_child_used_math(condition, child) \
  1448. do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
  1449. #define conditional_used_math(condition) \
  1450. conditional_stopped_child_used_math(condition, current)
  1451. #define copy_to_stopped_child_used_math(child) \
  1452. do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
  1453. /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
  1454. #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
  1455. #define used_math() tsk_used_math(current)
  1456. /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
  1457. * __GFP_FS is also cleared as it implies __GFP_IO.
  1458. */
  1459. static inline gfp_t memalloc_noio_flags(gfp_t flags)
  1460. {
  1461. if (unlikely(current->flags & PF_MEMALLOC_NOIO))
  1462. flags &= ~(__GFP_IO | __GFP_FS);
  1463. return flags;
  1464. }
  1465. static inline unsigned int memalloc_noio_save(void)
  1466. {
  1467. unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
  1468. current->flags |= PF_MEMALLOC_NOIO;
  1469. return flags;
  1470. }
  1471. static inline void memalloc_noio_restore(unsigned int flags)
  1472. {
  1473. current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
  1474. }
  1475. /* Per-process atomic flags. */
  1476. #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
  1477. #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
  1478. #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
  1479. #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
  1480. #define TASK_PFA_TEST(name, func) \
  1481. static inline bool task_##func(struct task_struct *p) \
  1482. { return test_bit(PFA_##name, &p->atomic_flags); }
  1483. #define TASK_PFA_SET(name, func) \
  1484. static inline void task_set_##func(struct task_struct *p) \
  1485. { set_bit(PFA_##name, &p->atomic_flags); }
  1486. #define TASK_PFA_CLEAR(name, func) \
  1487. static inline void task_clear_##func(struct task_struct *p) \
  1488. { clear_bit(PFA_##name, &p->atomic_flags); }
  1489. TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
  1490. TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
  1491. TASK_PFA_TEST(SPREAD_PAGE, spread_page)
  1492. TASK_PFA_SET(SPREAD_PAGE, spread_page)
  1493. TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
  1494. TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
  1495. TASK_PFA_SET(SPREAD_SLAB, spread_slab)
  1496. TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
  1497. TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
  1498. TASK_PFA_SET(LMK_WAITING, lmk_waiting)
  1499. /*
  1500. * task->jobctl flags
  1501. */
  1502. #define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
  1503. #define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
  1504. #define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
  1505. #define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
  1506. #define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
  1507. #define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
  1508. #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
  1509. #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
  1510. #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
  1511. #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
  1512. #define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
  1513. #define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
  1514. #define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
  1515. #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
  1516. #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
  1517. #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
  1518. #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
  1519. extern bool task_set_jobctl_pending(struct task_struct *task,
  1520. unsigned long mask);
  1521. extern void task_clear_jobctl_trapping(struct task_struct *task);
  1522. extern void task_clear_jobctl_pending(struct task_struct *task,
  1523. unsigned long mask);
  1524. static inline void rcu_copy_process(struct task_struct *p)
  1525. {
  1526. #ifdef CONFIG_PREEMPT_RCU
  1527. p->rcu_read_lock_nesting = 0;
  1528. p->rcu_read_unlock_special.s = 0;
  1529. p->rcu_blocked_node = NULL;
  1530. INIT_LIST_HEAD(&p->rcu_node_entry);
  1531. #endif /* #ifdef CONFIG_PREEMPT_RCU */
  1532. #ifdef CONFIG_TASKS_RCU
  1533. p->rcu_tasks_holdout = false;
  1534. INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
  1535. p->rcu_tasks_idle_cpu = -1;
  1536. #endif /* #ifdef CONFIG_TASKS_RCU */
  1537. }
  1538. static inline void tsk_restore_flags(struct task_struct *task,
  1539. unsigned long orig_flags, unsigned long flags)
  1540. {
  1541. task->flags &= ~flags;
  1542. task->flags |= orig_flags & flags;
  1543. }
  1544. extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
  1545. const struct cpumask *trial);
  1546. extern int task_can_attach(struct task_struct *p,
  1547. const struct cpumask *cs_cpus_allowed);
  1548. #ifdef CONFIG_SMP
  1549. extern void do_set_cpus_allowed(struct task_struct *p,
  1550. const struct cpumask *new_mask);
  1551. extern int set_cpus_allowed_ptr(struct task_struct *p,
  1552. const struct cpumask *new_mask);
  1553. #else
  1554. static inline void do_set_cpus_allowed(struct task_struct *p,
  1555. const struct cpumask *new_mask)
  1556. {
  1557. }
  1558. static inline int set_cpus_allowed_ptr(struct task_struct *p,
  1559. const struct cpumask *new_mask)
  1560. {
  1561. if (!cpumask_test_cpu(0, new_mask))
  1562. return -EINVAL;
  1563. return 0;
  1564. }
  1565. #endif
  1566. #ifdef CONFIG_NO_HZ_COMMON
  1567. void calc_load_enter_idle(void);
  1568. void calc_load_exit_idle(void);
  1569. #else
  1570. static inline void calc_load_enter_idle(void) { }
  1571. static inline void calc_load_exit_idle(void) { }
  1572. #endif /* CONFIG_NO_HZ_COMMON */
  1573. #ifndef cpu_relax_yield
  1574. #define cpu_relax_yield() cpu_relax()
  1575. #endif
  1576. extern unsigned long long
  1577. task_sched_runtime(struct task_struct *task);
  1578. /* sched_exec is called by processes performing an exec */
  1579. #ifdef CONFIG_SMP
  1580. extern void sched_exec(void);
  1581. #else
  1582. #define sched_exec() {}
  1583. #endif
  1584. #ifdef CONFIG_HOTPLUG_CPU
  1585. extern void idle_task_exit(void);
  1586. #else
  1587. static inline void idle_task_exit(void) {}
  1588. #endif
  1589. #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
  1590. extern void wake_up_nohz_cpu(int cpu);
  1591. #else
  1592. static inline void wake_up_nohz_cpu(int cpu) { }
  1593. #endif
  1594. #ifdef CONFIG_NO_HZ_FULL
  1595. extern u64 scheduler_tick_max_deferment(void);
  1596. #endif
  1597. extern int yield_to(struct task_struct *p, bool preempt);
  1598. extern void set_user_nice(struct task_struct *p, long nice);
  1599. extern int task_prio(const struct task_struct *p);
  1600. /**
  1601. * task_nice - return the nice value of a given task.
  1602. * @p: the task in question.
  1603. *
  1604. * Return: The nice value [ -20 ... 0 ... 19 ].
  1605. */
  1606. static inline int task_nice(const struct task_struct *p)
  1607. {
  1608. return PRIO_TO_NICE((p)->static_prio);
  1609. }
  1610. extern int can_nice(const struct task_struct *p, const int nice);
  1611. extern int task_curr(const struct task_struct *p);
  1612. extern int idle_cpu(int cpu);
  1613. extern int sched_setscheduler(struct task_struct *, int,
  1614. const struct sched_param *);
  1615. extern int sched_setscheduler_nocheck(struct task_struct *, int,
  1616. const struct sched_param *);
  1617. extern int sched_setattr(struct task_struct *,
  1618. const struct sched_attr *);
  1619. extern struct task_struct *idle_task(int cpu);
  1620. /**
  1621. * is_idle_task - is the specified task an idle task?
  1622. * @p: the task in question.
  1623. *
  1624. * Return: 1 if @p is an idle task. 0 otherwise.
  1625. */
  1626. static inline bool is_idle_task(const struct task_struct *p)
  1627. {
  1628. return !!(p->flags & PF_IDLE);
  1629. }
  1630. extern struct task_struct *curr_task(int cpu);
  1631. extern void ia64_set_curr_task(int cpu, struct task_struct *p);
  1632. void yield(void);
  1633. union thread_union {
  1634. #ifndef CONFIG_THREAD_INFO_IN_TASK
  1635. struct thread_info thread_info;
  1636. #endif
  1637. unsigned long stack[THREAD_SIZE/sizeof(long)];
  1638. };
  1639. #ifndef __HAVE_ARCH_KSTACK_END
  1640. static inline int kstack_end(void *addr)
  1641. {
  1642. /* Reliable end of stack detection:
  1643. * Some APM bios versions misalign the stack
  1644. */
  1645. return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
  1646. }
  1647. #endif
  1648. extern union thread_union init_thread_union;
  1649. extern struct task_struct init_task;
  1650. extern struct mm_struct init_mm;
  1651. extern struct pid_namespace init_pid_ns;
  1652. /*
  1653. * find a task by one of its numerical ids
  1654. *
  1655. * find_task_by_pid_ns():
  1656. * finds a task by its pid in the specified namespace
  1657. * find_task_by_vpid():
  1658. * finds a task by its virtual pid
  1659. *
  1660. * see also find_vpid() etc in include/linux/pid.h
  1661. */
  1662. extern struct task_struct *find_task_by_vpid(pid_t nr);
  1663. extern struct task_struct *find_task_by_pid_ns(pid_t nr,
  1664. struct pid_namespace *ns);
  1665. /* per-UID process charging. */
  1666. extern struct user_struct * alloc_uid(kuid_t);
  1667. static inline struct user_struct *get_uid(struct user_struct *u)
  1668. {
  1669. atomic_inc(&u->__count);
  1670. return u;
  1671. }
  1672. extern void free_uid(struct user_struct *);
  1673. #include <asm/current.h>
  1674. extern void xtime_update(unsigned long ticks);
  1675. extern int wake_up_state(struct task_struct *tsk, unsigned int state);
  1676. extern int wake_up_process(struct task_struct *tsk);
  1677. extern void wake_up_new_task(struct task_struct *tsk);
  1678. #ifdef CONFIG_SMP
  1679. extern void kick_process(struct task_struct *tsk);
  1680. #else
  1681. static inline void kick_process(struct task_struct *tsk) { }
  1682. #endif
  1683. extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
  1684. extern void sched_dead(struct task_struct *p);
  1685. extern void proc_caches_init(void);
  1686. extern void release_task(struct task_struct * p);
  1687. #ifdef CONFIG_HAVE_COPY_THREAD_TLS
  1688. extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
  1689. struct task_struct *, unsigned long);
  1690. #else
  1691. extern int copy_thread(unsigned long, unsigned long, unsigned long,
  1692. struct task_struct *);
  1693. /* Architectures that haven't opted into copy_thread_tls get the tls argument
  1694. * via pt_regs, so ignore the tls argument passed via C. */
  1695. static inline int copy_thread_tls(
  1696. unsigned long clone_flags, unsigned long sp, unsigned long arg,
  1697. struct task_struct *p, unsigned long tls)
  1698. {
  1699. return copy_thread(clone_flags, sp, arg, p);
  1700. }
  1701. #endif
  1702. extern void flush_thread(void);
  1703. #ifdef CONFIG_HAVE_EXIT_THREAD
  1704. extern void exit_thread(struct task_struct *tsk);
  1705. #else
  1706. static inline void exit_thread(struct task_struct *tsk)
  1707. {
  1708. }
  1709. #endif
  1710. extern void exit_files(struct task_struct *);
  1711. extern void exit_itimers(struct signal_struct *);
  1712. extern void do_group_exit(int);
  1713. extern int do_execve(struct filename *,
  1714. const char __user * const __user *,
  1715. const char __user * const __user *);
  1716. extern int do_execveat(int, struct filename *,
  1717. const char __user * const __user *,
  1718. const char __user * const __user *,
  1719. int);
  1720. extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
  1721. extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
  1722. struct task_struct *fork_idle(int);
  1723. extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
  1724. extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
  1725. static inline void set_task_comm(struct task_struct *tsk, const char *from)
  1726. {
  1727. __set_task_comm(tsk, from, false);
  1728. }
  1729. extern char *get_task_comm(char *to, struct task_struct *tsk);
  1730. #ifdef CONFIG_SMP
  1731. void scheduler_ipi(void);
  1732. extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
  1733. #else
  1734. static inline void scheduler_ipi(void) { }
  1735. static inline unsigned long wait_task_inactive(struct task_struct *p,
  1736. long match_state)
  1737. {
  1738. return 1;
  1739. }
  1740. #endif
  1741. /*
  1742. * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
  1743. * subscriptions and synchronises with wait4(). Also used in procfs. Also
  1744. * pins the final release of task.io_context. Also protects ->cpuset and
  1745. * ->cgroup.subsys[]. And ->vfork_done.
  1746. *
  1747. * Nests both inside and outside of read_lock(&tasklist_lock).
  1748. * It must not be nested with write_lock_irq(&tasklist_lock),
  1749. * neither inside nor outside.
  1750. */
  1751. static inline void task_lock(struct task_struct *p)
  1752. {
  1753. spin_lock(&p->alloc_lock);
  1754. }
  1755. static inline void task_unlock(struct task_struct *p)
  1756. {
  1757. spin_unlock(&p->alloc_lock);
  1758. }
  1759. #ifdef CONFIG_THREAD_INFO_IN_TASK
  1760. static inline struct thread_info *task_thread_info(struct task_struct *task)
  1761. {
  1762. return &task->thread_info;
  1763. }
  1764. /*
  1765. * When accessing the stack of a non-current task that might exit, use
  1766. * try_get_task_stack() instead. task_stack_page will return a pointer
  1767. * that could get freed out from under you.
  1768. */
  1769. static inline void *task_stack_page(const struct task_struct *task)
  1770. {
  1771. return task->stack;
  1772. }
  1773. #define setup_thread_stack(new,old) do { } while(0)
  1774. static inline unsigned long *end_of_stack(const struct task_struct *task)
  1775. {
  1776. return task->stack;
  1777. }
  1778. #elif !defined(__HAVE_THREAD_FUNCTIONS)
  1779. #define task_thread_info(task) ((struct thread_info *)(task)->stack)
  1780. #define task_stack_page(task) ((void *)(task)->stack)
  1781. static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
  1782. {
  1783. *task_thread_info(p) = *task_thread_info(org);
  1784. task_thread_info(p)->task = p;
  1785. }
  1786. /*
  1787. * Return the address of the last usable long on the stack.
  1788. *
  1789. * When the stack grows down, this is just above the thread
  1790. * info struct. Going any lower will corrupt the threadinfo.
  1791. *
  1792. * When the stack grows up, this is the highest address.
  1793. * Beyond that position, we corrupt data on the next page.
  1794. */
  1795. static inline unsigned long *end_of_stack(struct task_struct *p)
  1796. {
  1797. #ifdef CONFIG_STACK_GROWSUP
  1798. return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
  1799. #else
  1800. return (unsigned long *)(task_thread_info(p) + 1);
  1801. #endif
  1802. }
  1803. #endif
  1804. #ifdef CONFIG_THREAD_INFO_IN_TASK
  1805. static inline void *try_get_task_stack(struct task_struct *tsk)
  1806. {
  1807. return atomic_inc_not_zero(&tsk->stack_refcount) ?
  1808. task_stack_page(tsk) : NULL;
  1809. }
  1810. extern void put_task_stack(struct task_struct *tsk);
  1811. #else
  1812. static inline void *try_get_task_stack(struct task_struct *tsk)
  1813. {
  1814. return task_stack_page(tsk);
  1815. }
  1816. static inline void put_task_stack(struct task_struct *tsk) {}
  1817. #endif
  1818. #define task_stack_end_corrupted(task) \
  1819. (*(end_of_stack(task)) != STACK_END_MAGIC)
  1820. static inline int object_is_on_stack(void *obj)
  1821. {
  1822. void *stack = task_stack_page(current);
  1823. return (obj >= stack) && (obj < (stack + THREAD_SIZE));
  1824. }
  1825. extern void thread_stack_cache_init(void);
  1826. #ifdef CONFIG_DEBUG_STACK_USAGE
  1827. static inline unsigned long stack_not_used(struct task_struct *p)
  1828. {
  1829. unsigned long *n = end_of_stack(p);
  1830. do { /* Skip over canary */
  1831. # ifdef CONFIG_STACK_GROWSUP
  1832. n--;
  1833. # else
  1834. n++;
  1835. # endif
  1836. } while (!*n);
  1837. # ifdef CONFIG_STACK_GROWSUP
  1838. return (unsigned long)end_of_stack(p) - (unsigned long)n;
  1839. # else
  1840. return (unsigned long)n - (unsigned long)end_of_stack(p);
  1841. # endif
  1842. }
  1843. #endif
  1844. extern void set_task_stack_end_magic(struct task_struct *tsk);
  1845. /* set thread flags in other task's structures
  1846. * - see asm/thread_info.h for TIF_xxxx flags available
  1847. */
  1848. static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
  1849. {
  1850. set_ti_thread_flag(task_thread_info(tsk), flag);
  1851. }
  1852. static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
  1853. {
  1854. clear_ti_thread_flag(task_thread_info(tsk), flag);
  1855. }
  1856. static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
  1857. {
  1858. return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
  1859. }
  1860. static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
  1861. {
  1862. return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
  1863. }
  1864. static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
  1865. {
  1866. return test_ti_thread_flag(task_thread_info(tsk), flag);
  1867. }
  1868. static inline void set_tsk_need_resched(struct task_struct *tsk)
  1869. {
  1870. set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
  1871. }
  1872. static inline void clear_tsk_need_resched(struct task_struct *tsk)
  1873. {
  1874. clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
  1875. }
  1876. static inline int test_tsk_need_resched(struct task_struct *tsk)
  1877. {
  1878. return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
  1879. }
  1880. static inline int restart_syscall(void)
  1881. {
  1882. set_tsk_thread_flag(current, TIF_SIGPENDING);
  1883. return -ERESTARTNOINTR;
  1884. }
  1885. static inline int signal_pending(struct task_struct *p)
  1886. {
  1887. return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
  1888. }
  1889. static inline int __fatal_signal_pending(struct task_struct *p)
  1890. {
  1891. return unlikely(sigismember(&p->pending.signal, SIGKILL));
  1892. }
  1893. static inline int fatal_signal_pending(struct task_struct *p)
  1894. {
  1895. return signal_pending(p) && __fatal_signal_pending(p);
  1896. }
  1897. static inline int signal_pending_state(long state, struct task_struct *p)
  1898. {
  1899. if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
  1900. return 0;
  1901. if (!signal_pending(p))
  1902. return 0;
  1903. return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
  1904. }
  1905. /*
  1906. * cond_resched() and cond_resched_lock(): latency reduction via
  1907. * explicit rescheduling in places that are safe. The return
  1908. * value indicates whether a reschedule was done in fact.
  1909. * cond_resched_lock() will drop the spinlock before scheduling,
  1910. * cond_resched_softirq() will enable bhs before scheduling.
  1911. */
  1912. #ifndef CONFIG_PREEMPT
  1913. extern int _cond_resched(void);
  1914. #else
  1915. static inline int _cond_resched(void) { return 0; }
  1916. #endif
  1917. #define cond_resched() ({ \
  1918. ___might_sleep(__FILE__, __LINE__, 0); \
  1919. _cond_resched(); \
  1920. })
  1921. extern int __cond_resched_lock(spinlock_t *lock);
  1922. #define cond_resched_lock(lock) ({ \
  1923. ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
  1924. __cond_resched_lock(lock); \
  1925. })
  1926. extern int __cond_resched_softirq(void);
  1927. #define cond_resched_softirq() ({ \
  1928. ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
  1929. __cond_resched_softirq(); \
  1930. })
  1931. static inline void cond_resched_rcu(void)
  1932. {
  1933. #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
  1934. rcu_read_unlock();
  1935. cond_resched();
  1936. rcu_read_lock();
  1937. #endif
  1938. }
  1939. /*
  1940. * Does a critical section need to be broken due to another
  1941. * task waiting?: (technically does not depend on CONFIG_PREEMPT,
  1942. * but a general need for low latency)
  1943. */
  1944. static inline int spin_needbreak(spinlock_t *lock)
  1945. {
  1946. #ifdef CONFIG_PREEMPT
  1947. return spin_is_contended(lock);
  1948. #else
  1949. return 0;
  1950. #endif
  1951. }
  1952. static __always_inline bool need_resched(void)
  1953. {
  1954. return unlikely(tif_need_resched());
  1955. }
  1956. /*
  1957. * Thread group CPU time accounting.
  1958. */
  1959. void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
  1960. void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
  1961. /*
  1962. * Reevaluate whether the task has signals pending delivery.
  1963. * Wake the task if so.
  1964. * This is required every time the blocked sigset_t changes.
  1965. * callers must hold sighand->siglock.
  1966. */
  1967. extern void recalc_sigpending_and_wake(struct task_struct *t);
  1968. extern void recalc_sigpending(void);
  1969. extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
  1970. static inline void signal_wake_up(struct task_struct *t, bool resume)
  1971. {
  1972. signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
  1973. }
  1974. static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
  1975. {
  1976. signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
  1977. }
  1978. /*
  1979. * Wrappers for p->thread_info->cpu access. No-op on UP.
  1980. */
  1981. #ifdef CONFIG_SMP
  1982. static inline unsigned int task_cpu(const struct task_struct *p)
  1983. {
  1984. #ifdef CONFIG_THREAD_INFO_IN_TASK
  1985. return p->cpu;
  1986. #else
  1987. return task_thread_info(p)->cpu;
  1988. #endif
  1989. }
  1990. static inline int task_node(const struct task_struct *p)
  1991. {
  1992. return cpu_to_node(task_cpu(p));
  1993. }
  1994. extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
  1995. #else
  1996. static inline unsigned int task_cpu(const struct task_struct *p)
  1997. {
  1998. return 0;
  1999. }
  2000. static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
  2001. {
  2002. }
  2003. #endif /* CONFIG_SMP */
  2004. /*
  2005. * In order to reduce various lock holder preemption latencies provide an
  2006. * interface to see if a vCPU is currently running or not.
  2007. *
  2008. * This allows us to terminate optimistic spin loops and block, analogous to
  2009. * the native optimistic spin heuristic of testing if the lock owner task is
  2010. * running or not.
  2011. */
  2012. #ifndef vcpu_is_preempted
  2013. # define vcpu_is_preempted(cpu) false
  2014. #endif
  2015. extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
  2016. extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
  2017. #ifdef CONFIG_CGROUP_SCHED
  2018. extern struct task_group root_task_group;
  2019. #endif /* CONFIG_CGROUP_SCHED */
  2020. extern int task_can_switch_user(struct user_struct *up,
  2021. struct task_struct *tsk);
  2022. #ifdef CONFIG_TASK_XACCT
  2023. static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
  2024. {
  2025. tsk->ioac.rchar += amt;
  2026. }
  2027. static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
  2028. {
  2029. tsk->ioac.wchar += amt;
  2030. }
  2031. static inline void inc_syscr(struct task_struct *tsk)
  2032. {
  2033. tsk->ioac.syscr++;
  2034. }
  2035. static inline void inc_syscw(struct task_struct *tsk)
  2036. {
  2037. tsk->ioac.syscw++;
  2038. }
  2039. #else
  2040. static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
  2041. {
  2042. }
  2043. static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
  2044. {
  2045. }
  2046. static inline void inc_syscr(struct task_struct *tsk)
  2047. {
  2048. }
  2049. static inline void inc_syscw(struct task_struct *tsk)
  2050. {
  2051. }
  2052. #endif
  2053. #ifndef TASK_SIZE_OF
  2054. #define TASK_SIZE_OF(tsk) TASK_SIZE
  2055. #endif
  2056. #ifdef CONFIG_MEMCG
  2057. extern void mm_update_next_owner(struct mm_struct *mm);
  2058. #else
  2059. static inline void mm_update_next_owner(struct mm_struct *mm)
  2060. {
  2061. }
  2062. #endif /* CONFIG_MEMCG */
  2063. #define SCHED_CPUFREQ_RT (1U << 0)
  2064. #define SCHED_CPUFREQ_DL (1U << 1)
  2065. #define SCHED_CPUFREQ_IOWAIT (1U << 2)
  2066. #define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
  2067. #ifdef CONFIG_CPU_FREQ
  2068. struct update_util_data {
  2069. void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
  2070. };
  2071. void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
  2072. void (*func)(struct update_util_data *data, u64 time,
  2073. unsigned int flags));
  2074. void cpufreq_remove_update_util_hook(int cpu);
  2075. #endif /* CONFIG_CPU_FREQ */
  2076. #endif