sched.h 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726
  1. #include <linux/sched.h>
  2. #include <linux/sched/sysctl.h>
  3. #include <linux/sched/rt.h>
  4. #include <linux/sched/deadline.h>
  5. #include <linux/mutex.h>
  6. #include <linux/spinlock.h>
  7. #include <linux/stop_machine.h>
  8. #include <linux/irq_work.h>
  9. #include <linux/tick.h>
  10. #include <linux/slab.h>
  11. #include "cpupri.h"
  12. #include "cpudeadline.h"
  13. #include "cpuacct.h"
  14. struct rq;
  15. struct cpuidle_state;
  16. /* task_struct::on_rq states: */
  17. #define TASK_ON_RQ_QUEUED 1
  18. #define TASK_ON_RQ_MIGRATING 2
  19. extern __read_mostly int scheduler_running;
  20. extern unsigned long calc_load_update;
  21. extern atomic_long_t calc_load_tasks;
  22. extern long calc_load_fold_active(struct rq *this_rq);
  23. extern void update_cpu_load_active(struct rq *this_rq);
  24. /*
  25. * Helpers for converting nanosecond timing to jiffy resolution
  26. */
  27. #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  28. /*
  29. * Increase resolution of nice-level calculations for 64-bit architectures.
  30. * The extra resolution improves shares distribution and load balancing of
  31. * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
  32. * hierarchies, especially on larger systems. This is not a user-visible change
  33. * and does not change the user-interface for setting shares/weights.
  34. *
  35. * We increase resolution only if we have enough bits to allow this increased
  36. * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
  37. * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
  38. * increased costs.
  39. */
  40. #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
  41. # define SCHED_LOAD_RESOLUTION 10
  42. # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
  43. # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
  44. #else
  45. # define SCHED_LOAD_RESOLUTION 0
  46. # define scale_load(w) (w)
  47. # define scale_load_down(w) (w)
  48. #endif
  49. #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
  50. #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
  51. #define NICE_0_LOAD SCHED_LOAD_SCALE
  52. #define NICE_0_SHIFT SCHED_LOAD_SHIFT
  53. /*
  54. * Single value that decides SCHED_DEADLINE internal math precision.
  55. * 10 -> just above 1us
  56. * 9 -> just above 0.5us
  57. */
  58. #define DL_SCALE (10)
  59. /*
  60. * These are the 'tuning knobs' of the scheduler:
  61. */
  62. /*
  63. * single value that denotes runtime == period, ie unlimited time.
  64. */
  65. #define RUNTIME_INF ((u64)~0ULL)
  66. static inline int fair_policy(int policy)
  67. {
  68. return policy == SCHED_NORMAL || policy == SCHED_BATCH;
  69. }
  70. static inline int rt_policy(int policy)
  71. {
  72. return policy == SCHED_FIFO || policy == SCHED_RR;
  73. }
  74. static inline int dl_policy(int policy)
  75. {
  76. return policy == SCHED_DEADLINE;
  77. }
  78. static inline int task_has_rt_policy(struct task_struct *p)
  79. {
  80. return rt_policy(p->policy);
  81. }
  82. static inline int task_has_dl_policy(struct task_struct *p)
  83. {
  84. return dl_policy(p->policy);
  85. }
  86. static inline bool dl_time_before(u64 a, u64 b)
  87. {
  88. return (s64)(a - b) < 0;
  89. }
  90. /*
  91. * Tells if entity @a should preempt entity @b.
  92. */
  93. static inline bool
  94. dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
  95. {
  96. return dl_time_before(a->deadline, b->deadline);
  97. }
  98. /*
  99. * This is the priority-queue data structure of the RT scheduling class:
  100. */
  101. struct rt_prio_array {
  102. DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
  103. struct list_head queue[MAX_RT_PRIO];
  104. };
  105. struct rt_bandwidth {
  106. /* nests inside the rq lock: */
  107. raw_spinlock_t rt_runtime_lock;
  108. ktime_t rt_period;
  109. u64 rt_runtime;
  110. struct hrtimer rt_period_timer;
  111. };
  112. void __dl_clear_params(struct task_struct *p);
  113. /*
  114. * To keep the bandwidth of -deadline tasks and groups under control
  115. * we need some place where:
  116. * - store the maximum -deadline bandwidth of the system (the group);
  117. * - cache the fraction of that bandwidth that is currently allocated.
  118. *
  119. * This is all done in the data structure below. It is similar to the
  120. * one used for RT-throttling (rt_bandwidth), with the main difference
  121. * that, since here we are only interested in admission control, we
  122. * do not decrease any runtime while the group "executes", neither we
  123. * need a timer to replenish it.
  124. *
  125. * With respect to SMP, the bandwidth is given on a per-CPU basis,
  126. * meaning that:
  127. * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
  128. * - dl_total_bw array contains, in the i-eth element, the currently
  129. * allocated bandwidth on the i-eth CPU.
  130. * Moreover, groups consume bandwidth on each CPU, while tasks only
  131. * consume bandwidth on the CPU they're running on.
  132. * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
  133. * that will be shown the next time the proc or cgroup controls will
  134. * be red. It on its turn can be changed by writing on its own
  135. * control.
  136. */
  137. struct dl_bandwidth {
  138. raw_spinlock_t dl_runtime_lock;
  139. u64 dl_runtime;
  140. u64 dl_period;
  141. };
  142. static inline int dl_bandwidth_enabled(void)
  143. {
  144. return sysctl_sched_rt_runtime >= 0;
  145. }
  146. extern struct dl_bw *dl_bw_of(int i);
  147. struct dl_bw {
  148. raw_spinlock_t lock;
  149. u64 bw, total_bw;
  150. };
  151. static inline
  152. void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
  153. {
  154. dl_b->total_bw -= tsk_bw;
  155. }
  156. static inline
  157. void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
  158. {
  159. dl_b->total_bw += tsk_bw;
  160. }
  161. static inline
  162. bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
  163. {
  164. return dl_b->bw != -1 &&
  165. dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
  166. }
  167. extern struct mutex sched_domains_mutex;
  168. #ifdef CONFIG_CGROUP_SCHED
  169. #include <linux/cgroup.h>
  170. struct cfs_rq;
  171. struct rt_rq;
  172. extern struct list_head task_groups;
  173. struct cfs_bandwidth {
  174. #ifdef CONFIG_CFS_BANDWIDTH
  175. raw_spinlock_t lock;
  176. ktime_t period;
  177. u64 quota, runtime;
  178. s64 hierarchical_quota;
  179. u64 runtime_expires;
  180. int idle, timer_active;
  181. struct hrtimer period_timer, slack_timer;
  182. struct list_head throttled_cfs_rq;
  183. /* statistics */
  184. int nr_periods, nr_throttled;
  185. u64 throttled_time;
  186. #endif
  187. };
  188. /* task group related information */
  189. struct task_group {
  190. struct cgroup_subsys_state css;
  191. #ifdef CONFIG_FAIR_GROUP_SCHED
  192. /* schedulable entities of this group on each cpu */
  193. struct sched_entity **se;
  194. /* runqueue "owned" by this group on each cpu */
  195. struct cfs_rq **cfs_rq;
  196. unsigned long shares;
  197. #ifdef CONFIG_SMP
  198. atomic_long_t load_avg;
  199. atomic_t runnable_avg;
  200. #endif
  201. #endif
  202. #ifdef CONFIG_RT_GROUP_SCHED
  203. struct sched_rt_entity **rt_se;
  204. struct rt_rq **rt_rq;
  205. struct rt_bandwidth rt_bandwidth;
  206. #endif
  207. struct rcu_head rcu;
  208. struct list_head list;
  209. struct task_group *parent;
  210. struct list_head siblings;
  211. struct list_head children;
  212. #ifdef CONFIG_SCHED_AUTOGROUP
  213. struct autogroup *autogroup;
  214. #endif
  215. struct cfs_bandwidth cfs_bandwidth;
  216. };
  217. #ifdef CONFIG_FAIR_GROUP_SCHED
  218. #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
  219. /*
  220. * A weight of 0 or 1 can cause arithmetics problems.
  221. * A weight of a cfs_rq is the sum of weights of which entities
  222. * are queued on this cfs_rq, so a weight of a entity should not be
  223. * too large, so as the shares value of a task group.
  224. * (The default weight is 1024 - so there's no practical
  225. * limitation from this.)
  226. */
  227. #define MIN_SHARES (1UL << 1)
  228. #define MAX_SHARES (1UL << 18)
  229. #endif
  230. typedef int (*tg_visitor)(struct task_group *, void *);
  231. extern int walk_tg_tree_from(struct task_group *from,
  232. tg_visitor down, tg_visitor up, void *data);
  233. /*
  234. * Iterate the full tree, calling @down when first entering a node and @up when
  235. * leaving it for the final time.
  236. *
  237. * Caller must hold rcu_lock or sufficient equivalent.
  238. */
  239. static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
  240. {
  241. return walk_tg_tree_from(&root_task_group, down, up, data);
  242. }
  243. extern int tg_nop(struct task_group *tg, void *data);
  244. extern void free_fair_sched_group(struct task_group *tg);
  245. extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
  246. extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
  247. extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
  248. struct sched_entity *se, int cpu,
  249. struct sched_entity *parent);
  250. extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
  251. extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
  252. extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
  253. extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
  254. extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
  255. extern void free_rt_sched_group(struct task_group *tg);
  256. extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
  257. extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
  258. struct sched_rt_entity *rt_se, int cpu,
  259. struct sched_rt_entity *parent);
  260. extern struct task_group *sched_create_group(struct task_group *parent);
  261. extern void sched_online_group(struct task_group *tg,
  262. struct task_group *parent);
  263. extern void sched_destroy_group(struct task_group *tg);
  264. extern void sched_offline_group(struct task_group *tg);
  265. extern void sched_move_task(struct task_struct *tsk);
  266. #ifdef CONFIG_FAIR_GROUP_SCHED
  267. extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
  268. #endif
  269. #else /* CONFIG_CGROUP_SCHED */
  270. struct cfs_bandwidth { };
  271. #endif /* CONFIG_CGROUP_SCHED */
  272. /* CFS-related fields in a runqueue */
  273. struct cfs_rq {
  274. struct load_weight load;
  275. unsigned int nr_running, h_nr_running;
  276. u64 exec_clock;
  277. u64 min_vruntime;
  278. #ifndef CONFIG_64BIT
  279. u64 min_vruntime_copy;
  280. #endif
  281. struct rb_root tasks_timeline;
  282. struct rb_node *rb_leftmost;
  283. /*
  284. * 'curr' points to currently running entity on this cfs_rq.
  285. * It is set to NULL otherwise (i.e when none are currently running).
  286. */
  287. struct sched_entity *curr, *next, *last, *skip;
  288. #ifdef CONFIG_SCHED_DEBUG
  289. unsigned int nr_spread_over;
  290. #endif
  291. #ifdef CONFIG_SMP
  292. /*
  293. * CFS Load tracking
  294. * Under CFS, load is tracked on a per-entity basis and aggregated up.
  295. * This allows for the description of both thread and group usage (in
  296. * the FAIR_GROUP_SCHED case).
  297. * runnable_load_avg is the sum of the load_avg_contrib of the
  298. * sched_entities on the rq.
  299. * blocked_load_avg is similar to runnable_load_avg except that its
  300. * the blocked sched_entities on the rq.
  301. * utilization_load_avg is the sum of the average running time of the
  302. * sched_entities on the rq.
  303. */
  304. unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg;
  305. atomic64_t decay_counter;
  306. u64 last_decay;
  307. atomic_long_t removed_load;
  308. #ifdef CONFIG_FAIR_GROUP_SCHED
  309. /* Required to track per-cpu representation of a task_group */
  310. u32 tg_runnable_contrib;
  311. unsigned long tg_load_contrib;
  312. /*
  313. * h_load = weight * f(tg)
  314. *
  315. * Where f(tg) is the recursive weight fraction assigned to
  316. * this group.
  317. */
  318. unsigned long h_load;
  319. u64 last_h_load_update;
  320. struct sched_entity *h_load_next;
  321. #endif /* CONFIG_FAIR_GROUP_SCHED */
  322. #endif /* CONFIG_SMP */
  323. #ifdef CONFIG_FAIR_GROUP_SCHED
  324. struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
  325. /*
  326. * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
  327. * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
  328. * (like users, containers etc.)
  329. *
  330. * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
  331. * list is used during load balance.
  332. */
  333. int on_list;
  334. struct list_head leaf_cfs_rq_list;
  335. struct task_group *tg; /* group that "owns" this runqueue */
  336. #ifdef CONFIG_CFS_BANDWIDTH
  337. int runtime_enabled;
  338. u64 runtime_expires;
  339. s64 runtime_remaining;
  340. u64 throttled_clock, throttled_clock_task;
  341. u64 throttled_clock_task_time;
  342. int throttled, throttle_count;
  343. struct list_head throttled_list;
  344. #endif /* CONFIG_CFS_BANDWIDTH */
  345. #endif /* CONFIG_FAIR_GROUP_SCHED */
  346. };
  347. static inline int rt_bandwidth_enabled(void)
  348. {
  349. return sysctl_sched_rt_runtime >= 0;
  350. }
  351. /* RT IPI pull logic requires IRQ_WORK */
  352. #ifdef CONFIG_IRQ_WORK
  353. # define HAVE_RT_PUSH_IPI
  354. #endif
  355. /* Real-Time classes' related field in a runqueue: */
  356. struct rt_rq {
  357. struct rt_prio_array active;
  358. unsigned int rt_nr_running;
  359. #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
  360. struct {
  361. int curr; /* highest queued rt task prio */
  362. #ifdef CONFIG_SMP
  363. int next; /* next highest */
  364. #endif
  365. } highest_prio;
  366. #endif
  367. #ifdef CONFIG_SMP
  368. unsigned long rt_nr_migratory;
  369. unsigned long rt_nr_total;
  370. int overloaded;
  371. struct plist_head pushable_tasks;
  372. #ifdef HAVE_RT_PUSH_IPI
  373. int push_flags;
  374. int push_cpu;
  375. struct irq_work push_work;
  376. raw_spinlock_t push_lock;
  377. #endif
  378. #endif /* CONFIG_SMP */
  379. int rt_queued;
  380. int rt_throttled;
  381. u64 rt_time;
  382. u64 rt_runtime;
  383. /* Nests inside the rq lock: */
  384. raw_spinlock_t rt_runtime_lock;
  385. #ifdef CONFIG_RT_GROUP_SCHED
  386. unsigned long rt_nr_boosted;
  387. struct rq *rq;
  388. struct task_group *tg;
  389. #endif
  390. };
  391. /* Deadline class' related fields in a runqueue */
  392. struct dl_rq {
  393. /* runqueue is an rbtree, ordered by deadline */
  394. struct rb_root rb_root;
  395. struct rb_node *rb_leftmost;
  396. unsigned long dl_nr_running;
  397. #ifdef CONFIG_SMP
  398. /*
  399. * Deadline values of the currently executing and the
  400. * earliest ready task on this rq. Caching these facilitates
  401. * the decision wether or not a ready but not running task
  402. * should migrate somewhere else.
  403. */
  404. struct {
  405. u64 curr;
  406. u64 next;
  407. } earliest_dl;
  408. unsigned long dl_nr_migratory;
  409. int overloaded;
  410. /*
  411. * Tasks on this rq that can be pushed away. They are kept in
  412. * an rb-tree, ordered by tasks' deadlines, with caching
  413. * of the leftmost (earliest deadline) element.
  414. */
  415. struct rb_root pushable_dl_tasks_root;
  416. struct rb_node *pushable_dl_tasks_leftmost;
  417. #else
  418. struct dl_bw dl_bw;
  419. #endif
  420. };
  421. #ifdef CONFIG_SMP
  422. /*
  423. * We add the notion of a root-domain which will be used to define per-domain
  424. * variables. Each exclusive cpuset essentially defines an island domain by
  425. * fully partitioning the member cpus from any other cpuset. Whenever a new
  426. * exclusive cpuset is created, we also create and attach a new root-domain
  427. * object.
  428. *
  429. */
  430. struct root_domain {
  431. atomic_t refcount;
  432. atomic_t rto_count;
  433. struct rcu_head rcu;
  434. cpumask_var_t span;
  435. cpumask_var_t online;
  436. /* Indicate more than one runnable task for any CPU */
  437. bool overload;
  438. /*
  439. * The bit corresponding to a CPU gets set here if such CPU has more
  440. * than one runnable -deadline task (as it is below for RT tasks).
  441. */
  442. cpumask_var_t dlo_mask;
  443. atomic_t dlo_count;
  444. struct dl_bw dl_bw;
  445. struct cpudl cpudl;
  446. /*
  447. * The "RT overload" flag: it gets set if a CPU has more than
  448. * one runnable RT task.
  449. */
  450. cpumask_var_t rto_mask;
  451. struct cpupri cpupri;
  452. };
  453. extern struct root_domain def_root_domain;
  454. #endif /* CONFIG_SMP */
  455. /*
  456. * This is the main, per-CPU runqueue data structure.
  457. *
  458. * Locking rule: those places that want to lock multiple runqueues
  459. * (such as the load balancing or the thread migration code), lock
  460. * acquire operations must be ordered by ascending &runqueue.
  461. */
  462. struct rq {
  463. /* runqueue lock: */
  464. raw_spinlock_t lock;
  465. /*
  466. * nr_running and cpu_load should be in the same cacheline because
  467. * remote CPUs use both these fields when doing load calculation.
  468. */
  469. unsigned int nr_running;
  470. #ifdef CONFIG_NUMA_BALANCING
  471. unsigned int nr_numa_running;
  472. unsigned int nr_preferred_running;
  473. #endif
  474. #define CPU_LOAD_IDX_MAX 5
  475. unsigned long cpu_load[CPU_LOAD_IDX_MAX];
  476. unsigned long last_load_update_tick;
  477. #ifdef CONFIG_NO_HZ_COMMON
  478. u64 nohz_stamp;
  479. unsigned long nohz_flags;
  480. #endif
  481. #ifdef CONFIG_NO_HZ_FULL
  482. unsigned long last_sched_tick;
  483. #endif
  484. /* capture load from *all* tasks on this cpu: */
  485. struct load_weight load;
  486. unsigned long nr_load_updates;
  487. u64 nr_switches;
  488. struct cfs_rq cfs;
  489. struct rt_rq rt;
  490. struct dl_rq dl;
  491. #ifdef CONFIG_FAIR_GROUP_SCHED
  492. /* list of leaf cfs_rq on this cpu: */
  493. struct list_head leaf_cfs_rq_list;
  494. struct sched_avg avg;
  495. #endif /* CONFIG_FAIR_GROUP_SCHED */
  496. /*
  497. * This is part of a global counter where only the total sum
  498. * over all CPUs matters. A task can increase this counter on
  499. * one CPU and if it got migrated afterwards it may decrease
  500. * it on another CPU. Always updated under the runqueue lock:
  501. */
  502. unsigned long nr_uninterruptible;
  503. struct task_struct *curr, *idle, *stop;
  504. unsigned long next_balance;
  505. struct mm_struct *prev_mm;
  506. unsigned int clock_skip_update;
  507. u64 clock;
  508. u64 clock_task;
  509. atomic_t nr_iowait;
  510. #ifdef CONFIG_SMP
  511. struct root_domain *rd;
  512. struct sched_domain *sd;
  513. unsigned long cpu_capacity;
  514. unsigned char idle_balance;
  515. /* For active balancing */
  516. int post_schedule;
  517. int active_balance;
  518. int push_cpu;
  519. struct cpu_stop_work active_balance_work;
  520. /* cpu of this runqueue: */
  521. int cpu;
  522. int online;
  523. struct list_head cfs_tasks;
  524. u64 rt_avg;
  525. u64 age_stamp;
  526. u64 idle_stamp;
  527. u64 avg_idle;
  528. /* This is used to determine avg_idle's max value */
  529. u64 max_idle_balance_cost;
  530. #endif
  531. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  532. u64 prev_irq_time;
  533. #endif
  534. #ifdef CONFIG_PARAVIRT
  535. u64 prev_steal_time;
  536. #endif
  537. #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
  538. u64 prev_steal_time_rq;
  539. #endif
  540. /* calc_load related fields */
  541. unsigned long calc_load_update;
  542. long calc_load_active;
  543. #ifdef CONFIG_SCHED_HRTICK
  544. #ifdef CONFIG_SMP
  545. int hrtick_csd_pending;
  546. struct call_single_data hrtick_csd;
  547. #endif
  548. struct hrtimer hrtick_timer;
  549. #endif
  550. #ifdef CONFIG_SCHEDSTATS
  551. /* latency stats */
  552. struct sched_info rq_sched_info;
  553. unsigned long long rq_cpu_time;
  554. /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
  555. /* sys_sched_yield() stats */
  556. unsigned int yld_count;
  557. /* schedule() stats */
  558. unsigned int sched_count;
  559. unsigned int sched_goidle;
  560. /* try_to_wake_up() stats */
  561. unsigned int ttwu_count;
  562. unsigned int ttwu_local;
  563. #endif
  564. #ifdef CONFIG_SMP
  565. struct llist_head wake_list;
  566. #endif
  567. #ifdef CONFIG_CPU_IDLE
  568. /* Must be inspected within a rcu lock section */
  569. struct cpuidle_state *idle_state;
  570. #endif
  571. };
  572. static inline int cpu_of(struct rq *rq)
  573. {
  574. #ifdef CONFIG_SMP
  575. return rq->cpu;
  576. #else
  577. return 0;
  578. #endif
  579. }
  580. DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  581. #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
  582. #define this_rq() this_cpu_ptr(&runqueues)
  583. #define task_rq(p) cpu_rq(task_cpu(p))
  584. #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
  585. #define raw_rq() raw_cpu_ptr(&runqueues)
  586. static inline u64 __rq_clock_broken(struct rq *rq)
  587. {
  588. return ACCESS_ONCE(rq->clock);
  589. }
  590. static inline u64 rq_clock(struct rq *rq)
  591. {
  592. lockdep_assert_held(&rq->lock);
  593. return rq->clock;
  594. }
  595. static inline u64 rq_clock_task(struct rq *rq)
  596. {
  597. lockdep_assert_held(&rq->lock);
  598. return rq->clock_task;
  599. }
  600. #define RQCF_REQ_SKIP 0x01
  601. #define RQCF_ACT_SKIP 0x02
  602. static inline void rq_clock_skip_update(struct rq *rq, bool skip)
  603. {
  604. lockdep_assert_held(&rq->lock);
  605. if (skip)
  606. rq->clock_skip_update |= RQCF_REQ_SKIP;
  607. else
  608. rq->clock_skip_update &= ~RQCF_REQ_SKIP;
  609. }
  610. #ifdef CONFIG_NUMA
  611. enum numa_topology_type {
  612. NUMA_DIRECT,
  613. NUMA_GLUELESS_MESH,
  614. NUMA_BACKPLANE,
  615. };
  616. extern enum numa_topology_type sched_numa_topology_type;
  617. extern int sched_max_numa_distance;
  618. extern bool find_numa_distance(int distance);
  619. #endif
  620. #ifdef CONFIG_NUMA_BALANCING
  621. /* The regions in numa_faults array from task_struct */
  622. enum numa_faults_stats {
  623. NUMA_MEM = 0,
  624. NUMA_CPU,
  625. NUMA_MEMBUF,
  626. NUMA_CPUBUF
  627. };
  628. extern void sched_setnuma(struct task_struct *p, int node);
  629. extern int migrate_task_to(struct task_struct *p, int cpu);
  630. extern int migrate_swap(struct task_struct *, struct task_struct *);
  631. #endif /* CONFIG_NUMA_BALANCING */
  632. #ifdef CONFIG_SMP
  633. extern void sched_ttwu_pending(void);
  634. #define rcu_dereference_check_sched_domain(p) \
  635. rcu_dereference_check((p), \
  636. lockdep_is_held(&sched_domains_mutex))
  637. /*
  638. * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  639. * See detach_destroy_domains: synchronize_sched for details.
  640. *
  641. * The domain tree of any CPU may only be accessed from within
  642. * preempt-disabled sections.
  643. */
  644. #define for_each_domain(cpu, __sd) \
  645. for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
  646. __sd; __sd = __sd->parent)
  647. #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
  648. /**
  649. * highest_flag_domain - Return highest sched_domain containing flag.
  650. * @cpu: The cpu whose highest level of sched domain is to
  651. * be returned.
  652. * @flag: The flag to check for the highest sched_domain
  653. * for the given cpu.
  654. *
  655. * Returns the highest sched_domain of a cpu which contains the given flag.
  656. */
  657. static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
  658. {
  659. struct sched_domain *sd, *hsd = NULL;
  660. for_each_domain(cpu, sd) {
  661. if (!(sd->flags & flag))
  662. break;
  663. hsd = sd;
  664. }
  665. return hsd;
  666. }
  667. static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
  668. {
  669. struct sched_domain *sd;
  670. for_each_domain(cpu, sd) {
  671. if (sd->flags & flag)
  672. break;
  673. }
  674. return sd;
  675. }
  676. DECLARE_PER_CPU(struct sched_domain *, sd_llc);
  677. DECLARE_PER_CPU(int, sd_llc_size);
  678. DECLARE_PER_CPU(int, sd_llc_id);
  679. DECLARE_PER_CPU(struct sched_domain *, sd_numa);
  680. DECLARE_PER_CPU(struct sched_domain *, sd_busy);
  681. DECLARE_PER_CPU(struct sched_domain *, sd_asym);
  682. struct sched_group_capacity {
  683. atomic_t ref;
  684. /*
  685. * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
  686. * for a single CPU.
  687. */
  688. unsigned int capacity, capacity_orig;
  689. unsigned long next_update;
  690. int imbalance; /* XXX unrelated to capacity but shared group state */
  691. /*
  692. * Number of busy cpus in this group.
  693. */
  694. atomic_t nr_busy_cpus;
  695. unsigned long cpumask[0]; /* iteration mask */
  696. };
  697. struct sched_group {
  698. struct sched_group *next; /* Must be a circular list */
  699. atomic_t ref;
  700. unsigned int group_weight;
  701. struct sched_group_capacity *sgc;
  702. /*
  703. * The CPUs this group covers.
  704. *
  705. * NOTE: this field is variable length. (Allocated dynamically
  706. * by attaching extra space to the end of the structure,
  707. * depending on how many CPUs the kernel has booted up with)
  708. */
  709. unsigned long cpumask[0];
  710. };
  711. static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
  712. {
  713. return to_cpumask(sg->cpumask);
  714. }
  715. /*
  716. * cpumask masking which cpus in the group are allowed to iterate up the domain
  717. * tree.
  718. */
  719. static inline struct cpumask *sched_group_mask(struct sched_group *sg)
  720. {
  721. return to_cpumask(sg->sgc->cpumask);
  722. }
  723. /**
  724. * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
  725. * @group: The group whose first cpu is to be returned.
  726. */
  727. static inline unsigned int group_first_cpu(struct sched_group *group)
  728. {
  729. return cpumask_first(sched_group_cpus(group));
  730. }
  731. extern int group_balance_cpu(struct sched_group *sg);
  732. #else
  733. static inline void sched_ttwu_pending(void) { }
  734. #endif /* CONFIG_SMP */
  735. #include "stats.h"
  736. #include "auto_group.h"
  737. #ifdef CONFIG_CGROUP_SCHED
  738. /*
  739. * Return the group to which this tasks belongs.
  740. *
  741. * We cannot use task_css() and friends because the cgroup subsystem
  742. * changes that value before the cgroup_subsys::attach() method is called,
  743. * therefore we cannot pin it and might observe the wrong value.
  744. *
  745. * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
  746. * core changes this before calling sched_move_task().
  747. *
  748. * Instead we use a 'copy' which is updated from sched_move_task() while
  749. * holding both task_struct::pi_lock and rq::lock.
  750. */
  751. static inline struct task_group *task_group(struct task_struct *p)
  752. {
  753. return p->sched_task_group;
  754. }
  755. /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
  756. static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
  757. {
  758. #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
  759. struct task_group *tg = task_group(p);
  760. #endif
  761. #ifdef CONFIG_FAIR_GROUP_SCHED
  762. p->se.cfs_rq = tg->cfs_rq[cpu];
  763. p->se.parent = tg->se[cpu];
  764. #endif
  765. #ifdef CONFIG_RT_GROUP_SCHED
  766. p->rt.rt_rq = tg->rt_rq[cpu];
  767. p->rt.parent = tg->rt_se[cpu];
  768. #endif
  769. }
  770. #else /* CONFIG_CGROUP_SCHED */
  771. static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
  772. static inline struct task_group *task_group(struct task_struct *p)
  773. {
  774. return NULL;
  775. }
  776. #endif /* CONFIG_CGROUP_SCHED */
  777. static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
  778. {
  779. set_task_rq(p, cpu);
  780. #ifdef CONFIG_SMP
  781. /*
  782. * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
  783. * successfuly executed on another CPU. We must ensure that updates of
  784. * per-task data have been completed by this moment.
  785. */
  786. smp_wmb();
  787. task_thread_info(p)->cpu = cpu;
  788. p->wake_cpu = cpu;
  789. #endif
  790. }
  791. /*
  792. * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  793. */
  794. #ifdef CONFIG_SCHED_DEBUG
  795. # include <linux/static_key.h>
  796. # define const_debug __read_mostly
  797. #else
  798. # define const_debug const
  799. #endif
  800. extern const_debug unsigned int sysctl_sched_features;
  801. #define SCHED_FEAT(name, enabled) \
  802. __SCHED_FEAT_##name ,
  803. enum {
  804. #include "features.h"
  805. __SCHED_FEAT_NR,
  806. };
  807. #undef SCHED_FEAT
  808. #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
  809. #define SCHED_FEAT(name, enabled) \
  810. static __always_inline bool static_branch_##name(struct static_key *key) \
  811. { \
  812. return static_key_##enabled(key); \
  813. }
  814. #include "features.h"
  815. #undef SCHED_FEAT
  816. extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
  817. #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
  818. #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
  819. #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
  820. #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
  821. #ifdef CONFIG_NUMA_BALANCING
  822. #define sched_feat_numa(x) sched_feat(x)
  823. #ifdef CONFIG_SCHED_DEBUG
  824. #define numabalancing_enabled sched_feat_numa(NUMA)
  825. #else
  826. extern bool numabalancing_enabled;
  827. #endif /* CONFIG_SCHED_DEBUG */
  828. #else
  829. #define sched_feat_numa(x) (0)
  830. #define numabalancing_enabled (0)
  831. #endif /* CONFIG_NUMA_BALANCING */
  832. static inline u64 global_rt_period(void)
  833. {
  834. return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
  835. }
  836. static inline u64 global_rt_runtime(void)
  837. {
  838. if (sysctl_sched_rt_runtime < 0)
  839. return RUNTIME_INF;
  840. return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
  841. }
  842. static inline int task_current(struct rq *rq, struct task_struct *p)
  843. {
  844. return rq->curr == p;
  845. }
  846. static inline int task_running(struct rq *rq, struct task_struct *p)
  847. {
  848. #ifdef CONFIG_SMP
  849. return p->on_cpu;
  850. #else
  851. return task_current(rq, p);
  852. #endif
  853. }
  854. static inline int task_on_rq_queued(struct task_struct *p)
  855. {
  856. return p->on_rq == TASK_ON_RQ_QUEUED;
  857. }
  858. static inline int task_on_rq_migrating(struct task_struct *p)
  859. {
  860. return p->on_rq == TASK_ON_RQ_MIGRATING;
  861. }
  862. #ifndef prepare_arch_switch
  863. # define prepare_arch_switch(next) do { } while (0)
  864. #endif
  865. #ifndef finish_arch_switch
  866. # define finish_arch_switch(prev) do { } while (0)
  867. #endif
  868. #ifndef finish_arch_post_lock_switch
  869. # define finish_arch_post_lock_switch() do { } while (0)
  870. #endif
  871. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  872. {
  873. #ifdef CONFIG_SMP
  874. /*
  875. * We can optimise this out completely for !SMP, because the
  876. * SMP rebalancing from interrupt is the only thing that cares
  877. * here.
  878. */
  879. next->on_cpu = 1;
  880. #endif
  881. }
  882. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  883. {
  884. #ifdef CONFIG_SMP
  885. /*
  886. * After ->on_cpu is cleared, the task can be moved to a different CPU.
  887. * We must ensure this doesn't happen until the switch is completely
  888. * finished.
  889. */
  890. smp_wmb();
  891. prev->on_cpu = 0;
  892. #endif
  893. #ifdef CONFIG_DEBUG_SPINLOCK
  894. /* this is a valid case when another task releases the spinlock */
  895. rq->lock.owner = current;
  896. #endif
  897. /*
  898. * If we are tracking spinlock dependencies then we have to
  899. * fix up the runqueue lock - which gets 'carried over' from
  900. * prev into current:
  901. */
  902. spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
  903. raw_spin_unlock_irq(&rq->lock);
  904. }
  905. /*
  906. * wake flags
  907. */
  908. #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
  909. #define WF_FORK 0x02 /* child wakeup after fork */
  910. #define WF_MIGRATED 0x4 /* internal use, task got migrated */
  911. /*
  912. * To aid in avoiding the subversion of "niceness" due to uneven distribution
  913. * of tasks with abnormal "nice" values across CPUs the contribution that
  914. * each task makes to its run queue's load is weighted according to its
  915. * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
  916. * scaled version of the new time slice allocation that they receive on time
  917. * slice expiry etc.
  918. */
  919. #define WEIGHT_IDLEPRIO 3
  920. #define WMULT_IDLEPRIO 1431655765
  921. /*
  922. * Nice levels are multiplicative, with a gentle 10% change for every
  923. * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
  924. * nice 1, it will get ~10% less CPU time than another CPU-bound task
  925. * that remained on nice 0.
  926. *
  927. * The "10% effect" is relative and cumulative: from _any_ nice level,
  928. * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
  929. * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
  930. * If a task goes up by ~10% and another task goes down by ~10% then
  931. * the relative distance between them is ~25%.)
  932. */
  933. static const int prio_to_weight[40] = {
  934. /* -20 */ 88761, 71755, 56483, 46273, 36291,
  935. /* -15 */ 29154, 23254, 18705, 14949, 11916,
  936. /* -10 */ 9548, 7620, 6100, 4904, 3906,
  937. /* -5 */ 3121, 2501, 1991, 1586, 1277,
  938. /* 0 */ 1024, 820, 655, 526, 423,
  939. /* 5 */ 335, 272, 215, 172, 137,
  940. /* 10 */ 110, 87, 70, 56, 45,
  941. /* 15 */ 36, 29, 23, 18, 15,
  942. };
  943. /*
  944. * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
  945. *
  946. * In cases where the weight does not change often, we can use the
  947. * precalculated inverse to speed up arithmetics by turning divisions
  948. * into multiplications:
  949. */
  950. static const u32 prio_to_wmult[40] = {
  951. /* -20 */ 48388, 59856, 76040, 92818, 118348,
  952. /* -15 */ 147320, 184698, 229616, 287308, 360437,
  953. /* -10 */ 449829, 563644, 704093, 875809, 1099582,
  954. /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
  955. /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
  956. /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
  957. /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
  958. /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
  959. };
  960. #define ENQUEUE_WAKEUP 1
  961. #define ENQUEUE_HEAD 2
  962. #ifdef CONFIG_SMP
  963. #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
  964. #else
  965. #define ENQUEUE_WAKING 0
  966. #endif
  967. #define ENQUEUE_REPLENISH 8
  968. #define DEQUEUE_SLEEP 1
  969. #define RETRY_TASK ((void *)-1UL)
  970. struct sched_class {
  971. const struct sched_class *next;
  972. void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
  973. void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
  974. void (*yield_task) (struct rq *rq);
  975. bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
  976. void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
  977. /*
  978. * It is the responsibility of the pick_next_task() method that will
  979. * return the next task to call put_prev_task() on the @prev task or
  980. * something equivalent.
  981. *
  982. * May return RETRY_TASK when it finds a higher prio class has runnable
  983. * tasks.
  984. */
  985. struct task_struct * (*pick_next_task) (struct rq *rq,
  986. struct task_struct *prev);
  987. void (*put_prev_task) (struct rq *rq, struct task_struct *p);
  988. #ifdef CONFIG_SMP
  989. int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
  990. void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
  991. void (*post_schedule) (struct rq *this_rq);
  992. void (*task_waking) (struct task_struct *task);
  993. void (*task_woken) (struct rq *this_rq, struct task_struct *task);
  994. void (*set_cpus_allowed)(struct task_struct *p,
  995. const struct cpumask *newmask);
  996. void (*rq_online)(struct rq *rq);
  997. void (*rq_offline)(struct rq *rq);
  998. #endif
  999. void (*set_curr_task) (struct rq *rq);
  1000. void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
  1001. void (*task_fork) (struct task_struct *p);
  1002. void (*task_dead) (struct task_struct *p);
  1003. /*
  1004. * The switched_from() call is allowed to drop rq->lock, therefore we
  1005. * cannot assume the switched_from/switched_to pair is serliazed by
  1006. * rq->lock. They are however serialized by p->pi_lock.
  1007. */
  1008. void (*switched_from) (struct rq *this_rq, struct task_struct *task);
  1009. void (*switched_to) (struct rq *this_rq, struct task_struct *task);
  1010. void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
  1011. int oldprio);
  1012. unsigned int (*get_rr_interval) (struct rq *rq,
  1013. struct task_struct *task);
  1014. void (*update_curr) (struct rq *rq);
  1015. #ifdef CONFIG_FAIR_GROUP_SCHED
  1016. void (*task_move_group) (struct task_struct *p, int on_rq);
  1017. #endif
  1018. };
  1019. static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
  1020. {
  1021. prev->sched_class->put_prev_task(rq, prev);
  1022. }
  1023. #define sched_class_highest (&stop_sched_class)
  1024. #define for_each_class(class) \
  1025. for (class = sched_class_highest; class; class = class->next)
  1026. extern const struct sched_class stop_sched_class;
  1027. extern const struct sched_class dl_sched_class;
  1028. extern const struct sched_class rt_sched_class;
  1029. extern const struct sched_class fair_sched_class;
  1030. extern const struct sched_class idle_sched_class;
  1031. #ifdef CONFIG_SMP
  1032. extern void update_group_capacity(struct sched_domain *sd, int cpu);
  1033. extern void trigger_load_balance(struct rq *rq);
  1034. extern void idle_enter_fair(struct rq *this_rq);
  1035. extern void idle_exit_fair(struct rq *this_rq);
  1036. #else
  1037. static inline void idle_enter_fair(struct rq *rq) { }
  1038. static inline void idle_exit_fair(struct rq *rq) { }
  1039. #endif
  1040. #ifdef CONFIG_CPU_IDLE
  1041. static inline void idle_set_state(struct rq *rq,
  1042. struct cpuidle_state *idle_state)
  1043. {
  1044. rq->idle_state = idle_state;
  1045. }
  1046. static inline struct cpuidle_state *idle_get_state(struct rq *rq)
  1047. {
  1048. WARN_ON(!rcu_read_lock_held());
  1049. return rq->idle_state;
  1050. }
  1051. #else
  1052. static inline void idle_set_state(struct rq *rq,
  1053. struct cpuidle_state *idle_state)
  1054. {
  1055. }
  1056. static inline struct cpuidle_state *idle_get_state(struct rq *rq)
  1057. {
  1058. return NULL;
  1059. }
  1060. #endif
  1061. extern void sysrq_sched_debug_show(void);
  1062. extern void sched_init_granularity(void);
  1063. extern void update_max_interval(void);
  1064. extern void init_sched_dl_class(void);
  1065. extern void init_sched_rt_class(void);
  1066. extern void init_sched_fair_class(void);
  1067. extern void init_sched_dl_class(void);
  1068. extern void resched_curr(struct rq *rq);
  1069. extern void resched_cpu(int cpu);
  1070. extern struct rt_bandwidth def_rt_bandwidth;
  1071. extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
  1072. extern struct dl_bandwidth def_dl_bandwidth;
  1073. extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
  1074. extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
  1075. unsigned long to_ratio(u64 period, u64 runtime);
  1076. extern void update_idle_cpu_load(struct rq *this_rq);
  1077. extern void init_task_runnable_average(struct task_struct *p);
  1078. static inline void add_nr_running(struct rq *rq, unsigned count)
  1079. {
  1080. unsigned prev_nr = rq->nr_running;
  1081. rq->nr_running = prev_nr + count;
  1082. if (prev_nr < 2 && rq->nr_running >= 2) {
  1083. #ifdef CONFIG_SMP
  1084. if (!rq->rd->overload)
  1085. rq->rd->overload = true;
  1086. #endif
  1087. #ifdef CONFIG_NO_HZ_FULL
  1088. if (tick_nohz_full_cpu(rq->cpu)) {
  1089. /*
  1090. * Tick is needed if more than one task runs on a CPU.
  1091. * Send the target an IPI to kick it out of nohz mode.
  1092. *
  1093. * We assume that IPI implies full memory barrier and the
  1094. * new value of rq->nr_running is visible on reception
  1095. * from the target.
  1096. */
  1097. tick_nohz_full_kick_cpu(rq->cpu);
  1098. }
  1099. #endif
  1100. }
  1101. }
  1102. static inline void sub_nr_running(struct rq *rq, unsigned count)
  1103. {
  1104. rq->nr_running -= count;
  1105. }
  1106. static inline void rq_last_tick_reset(struct rq *rq)
  1107. {
  1108. #ifdef CONFIG_NO_HZ_FULL
  1109. rq->last_sched_tick = jiffies;
  1110. #endif
  1111. }
  1112. extern void update_rq_clock(struct rq *rq);
  1113. extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
  1114. extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
  1115. extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
  1116. extern const_debug unsigned int sysctl_sched_time_avg;
  1117. extern const_debug unsigned int sysctl_sched_nr_migrate;
  1118. extern const_debug unsigned int sysctl_sched_migration_cost;
  1119. static inline u64 sched_avg_period(void)
  1120. {
  1121. return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
  1122. }
  1123. #ifdef CONFIG_SCHED_HRTICK
  1124. /*
  1125. * Use hrtick when:
  1126. * - enabled by features
  1127. * - hrtimer is actually high res
  1128. */
  1129. static inline int hrtick_enabled(struct rq *rq)
  1130. {
  1131. if (!sched_feat(HRTICK))
  1132. return 0;
  1133. if (!cpu_active(cpu_of(rq)))
  1134. return 0;
  1135. return hrtimer_is_hres_active(&rq->hrtick_timer);
  1136. }
  1137. void hrtick_start(struct rq *rq, u64 delay);
  1138. #else
  1139. static inline int hrtick_enabled(struct rq *rq)
  1140. {
  1141. return 0;
  1142. }
  1143. #endif /* CONFIG_SCHED_HRTICK */
  1144. #ifdef CONFIG_SMP
  1145. extern void sched_avg_update(struct rq *rq);
  1146. static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
  1147. {
  1148. rq->rt_avg += rt_delta;
  1149. sched_avg_update(rq);
  1150. }
  1151. #else
  1152. static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
  1153. static inline void sched_avg_update(struct rq *rq) { }
  1154. #endif
  1155. extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
  1156. /*
  1157. * __task_rq_lock - lock the rq @p resides on.
  1158. */
  1159. static inline struct rq *__task_rq_lock(struct task_struct *p)
  1160. __acquires(rq->lock)
  1161. {
  1162. struct rq *rq;
  1163. lockdep_assert_held(&p->pi_lock);
  1164. for (;;) {
  1165. rq = task_rq(p);
  1166. raw_spin_lock(&rq->lock);
  1167. if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
  1168. return rq;
  1169. raw_spin_unlock(&rq->lock);
  1170. while (unlikely(task_on_rq_migrating(p)))
  1171. cpu_relax();
  1172. }
  1173. }
  1174. /*
  1175. * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
  1176. */
  1177. static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
  1178. __acquires(p->pi_lock)
  1179. __acquires(rq->lock)
  1180. {
  1181. struct rq *rq;
  1182. for (;;) {
  1183. raw_spin_lock_irqsave(&p->pi_lock, *flags);
  1184. rq = task_rq(p);
  1185. raw_spin_lock(&rq->lock);
  1186. /*
  1187. * move_queued_task() task_rq_lock()
  1188. *
  1189. * ACQUIRE (rq->lock)
  1190. * [S] ->on_rq = MIGRATING [L] rq = task_rq()
  1191. * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
  1192. * [S] ->cpu = new_cpu [L] task_rq()
  1193. * [L] ->on_rq
  1194. * RELEASE (rq->lock)
  1195. *
  1196. * If we observe the old cpu in task_rq_lock, the acquire of
  1197. * the old rq->lock will fully serialize against the stores.
  1198. *
  1199. * If we observe the new cpu in task_rq_lock, the acquire will
  1200. * pair with the WMB to ensure we must then also see migrating.
  1201. */
  1202. if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
  1203. return rq;
  1204. raw_spin_unlock(&rq->lock);
  1205. raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
  1206. while (unlikely(task_on_rq_migrating(p)))
  1207. cpu_relax();
  1208. }
  1209. }
  1210. static inline void __task_rq_unlock(struct rq *rq)
  1211. __releases(rq->lock)
  1212. {
  1213. raw_spin_unlock(&rq->lock);
  1214. }
  1215. static inline void
  1216. task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
  1217. __releases(rq->lock)
  1218. __releases(p->pi_lock)
  1219. {
  1220. raw_spin_unlock(&rq->lock);
  1221. raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
  1222. }
  1223. #ifdef CONFIG_SMP
  1224. #ifdef CONFIG_PREEMPT
  1225. static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
  1226. /*
  1227. * fair double_lock_balance: Safely acquires both rq->locks in a fair
  1228. * way at the expense of forcing extra atomic operations in all
  1229. * invocations. This assures that the double_lock is acquired using the
  1230. * same underlying policy as the spinlock_t on this architecture, which
  1231. * reduces latency compared to the unfair variant below. However, it
  1232. * also adds more overhead and therefore may reduce throughput.
  1233. */
  1234. static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1235. __releases(this_rq->lock)
  1236. __acquires(busiest->lock)
  1237. __acquires(this_rq->lock)
  1238. {
  1239. raw_spin_unlock(&this_rq->lock);
  1240. double_rq_lock(this_rq, busiest);
  1241. return 1;
  1242. }
  1243. #else
  1244. /*
  1245. * Unfair double_lock_balance: Optimizes throughput at the expense of
  1246. * latency by eliminating extra atomic operations when the locks are
  1247. * already in proper order on entry. This favors lower cpu-ids and will
  1248. * grant the double lock to lower cpus over higher ids under contention,
  1249. * regardless of entry order into the function.
  1250. */
  1251. static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1252. __releases(this_rq->lock)
  1253. __acquires(busiest->lock)
  1254. __acquires(this_rq->lock)
  1255. {
  1256. int ret = 0;
  1257. if (unlikely(!raw_spin_trylock(&busiest->lock))) {
  1258. if (busiest < this_rq) {
  1259. raw_spin_unlock(&this_rq->lock);
  1260. raw_spin_lock(&busiest->lock);
  1261. raw_spin_lock_nested(&this_rq->lock,
  1262. SINGLE_DEPTH_NESTING);
  1263. ret = 1;
  1264. } else
  1265. raw_spin_lock_nested(&busiest->lock,
  1266. SINGLE_DEPTH_NESTING);
  1267. }
  1268. return ret;
  1269. }
  1270. #endif /* CONFIG_PREEMPT */
  1271. /*
  1272. * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
  1273. */
  1274. static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1275. {
  1276. if (unlikely(!irqs_disabled())) {
  1277. /* printk() doesn't work good under rq->lock */
  1278. raw_spin_unlock(&this_rq->lock);
  1279. BUG_ON(1);
  1280. }
  1281. return _double_lock_balance(this_rq, busiest);
  1282. }
  1283. static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
  1284. __releases(busiest->lock)
  1285. {
  1286. raw_spin_unlock(&busiest->lock);
  1287. lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
  1288. }
  1289. static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
  1290. {
  1291. if (l1 > l2)
  1292. swap(l1, l2);
  1293. spin_lock(l1);
  1294. spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
  1295. }
  1296. static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
  1297. {
  1298. if (l1 > l2)
  1299. swap(l1, l2);
  1300. spin_lock_irq(l1);
  1301. spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
  1302. }
  1303. static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
  1304. {
  1305. if (l1 > l2)
  1306. swap(l1, l2);
  1307. raw_spin_lock(l1);
  1308. raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
  1309. }
  1310. /*
  1311. * double_rq_lock - safely lock two runqueues
  1312. *
  1313. * Note this does not disable interrupts like task_rq_lock,
  1314. * you need to do so manually before calling.
  1315. */
  1316. static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
  1317. __acquires(rq1->lock)
  1318. __acquires(rq2->lock)
  1319. {
  1320. BUG_ON(!irqs_disabled());
  1321. if (rq1 == rq2) {
  1322. raw_spin_lock(&rq1->lock);
  1323. __acquire(rq2->lock); /* Fake it out ;) */
  1324. } else {
  1325. if (rq1 < rq2) {
  1326. raw_spin_lock(&rq1->lock);
  1327. raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
  1328. } else {
  1329. raw_spin_lock(&rq2->lock);
  1330. raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
  1331. }
  1332. }
  1333. }
  1334. /*
  1335. * double_rq_unlock - safely unlock two runqueues
  1336. *
  1337. * Note this does not restore interrupts like task_rq_unlock,
  1338. * you need to do so manually after calling.
  1339. */
  1340. static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  1341. __releases(rq1->lock)
  1342. __releases(rq2->lock)
  1343. {
  1344. raw_spin_unlock(&rq1->lock);
  1345. if (rq1 != rq2)
  1346. raw_spin_unlock(&rq2->lock);
  1347. else
  1348. __release(rq2->lock);
  1349. }
  1350. #else /* CONFIG_SMP */
  1351. /*
  1352. * double_rq_lock - safely lock two runqueues
  1353. *
  1354. * Note this does not disable interrupts like task_rq_lock,
  1355. * you need to do so manually before calling.
  1356. */
  1357. static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
  1358. __acquires(rq1->lock)
  1359. __acquires(rq2->lock)
  1360. {
  1361. BUG_ON(!irqs_disabled());
  1362. BUG_ON(rq1 != rq2);
  1363. raw_spin_lock(&rq1->lock);
  1364. __acquire(rq2->lock); /* Fake it out ;) */
  1365. }
  1366. /*
  1367. * double_rq_unlock - safely unlock two runqueues
  1368. *
  1369. * Note this does not restore interrupts like task_rq_unlock,
  1370. * you need to do so manually after calling.
  1371. */
  1372. static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  1373. __releases(rq1->lock)
  1374. __releases(rq2->lock)
  1375. {
  1376. BUG_ON(rq1 != rq2);
  1377. raw_spin_unlock(&rq1->lock);
  1378. __release(rq2->lock);
  1379. }
  1380. #endif
  1381. extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
  1382. extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
  1383. extern void print_cfs_stats(struct seq_file *m, int cpu);
  1384. extern void print_rt_stats(struct seq_file *m, int cpu);
  1385. extern void print_dl_stats(struct seq_file *m, int cpu);
  1386. extern void init_cfs_rq(struct cfs_rq *cfs_rq);
  1387. extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
  1388. extern void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq);
  1389. extern void cfs_bandwidth_usage_inc(void);
  1390. extern void cfs_bandwidth_usage_dec(void);
  1391. #ifdef CONFIG_NO_HZ_COMMON
  1392. enum rq_nohz_flag_bits {
  1393. NOHZ_TICK_STOPPED,
  1394. NOHZ_BALANCE_KICK,
  1395. };
  1396. #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
  1397. #endif
  1398. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  1399. DECLARE_PER_CPU(u64, cpu_hardirq_time);
  1400. DECLARE_PER_CPU(u64, cpu_softirq_time);
  1401. #ifndef CONFIG_64BIT
  1402. DECLARE_PER_CPU(seqcount_t, irq_time_seq);
  1403. static inline void irq_time_write_begin(void)
  1404. {
  1405. __this_cpu_inc(irq_time_seq.sequence);
  1406. smp_wmb();
  1407. }
  1408. static inline void irq_time_write_end(void)
  1409. {
  1410. smp_wmb();
  1411. __this_cpu_inc(irq_time_seq.sequence);
  1412. }
  1413. static inline u64 irq_time_read(int cpu)
  1414. {
  1415. u64 irq_time;
  1416. unsigned seq;
  1417. do {
  1418. seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
  1419. irq_time = per_cpu(cpu_softirq_time, cpu) +
  1420. per_cpu(cpu_hardirq_time, cpu);
  1421. } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
  1422. return irq_time;
  1423. }
  1424. #else /* CONFIG_64BIT */
  1425. static inline void irq_time_write_begin(void)
  1426. {
  1427. }
  1428. static inline void irq_time_write_end(void)
  1429. {
  1430. }
  1431. static inline u64 irq_time_read(int cpu)
  1432. {
  1433. return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
  1434. }
  1435. #endif /* CONFIG_64BIT */
  1436. #endif /* CONFIG_IRQ_TIME_ACCOUNTING */