deadline.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761
  1. /*
  2. * Deadline Scheduling Class (SCHED_DEADLINE)
  3. *
  4. * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
  5. *
  6. * Tasks that periodically executes their instances for less than their
  7. * runtime won't miss any of their deadlines.
  8. * Tasks that are not periodic or sporadic or that tries to execute more
  9. * than their reserved bandwidth will be slowed down (and may potentially
  10. * miss some of their deadlines), and won't affect any other task.
  11. *
  12. * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  13. * Juri Lelli <juri.lelli@gmail.com>,
  14. * Michael Trimarchi <michael@amarulasolutions.com>,
  15. * Fabio Checconi <fchecconi@gmail.com>
  16. */
  17. #include "sched.h"
  18. #include <linux/slab.h>
  19. struct dl_bandwidth def_dl_bandwidth;
  20. static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  21. {
  22. return container_of(dl_se, struct task_struct, dl);
  23. }
  24. static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  25. {
  26. return container_of(dl_rq, struct rq, dl);
  27. }
  28. static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  29. {
  30. struct task_struct *p = dl_task_of(dl_se);
  31. struct rq *rq = task_rq(p);
  32. return &rq->dl;
  33. }
  34. static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  35. {
  36. return !RB_EMPTY_NODE(&dl_se->rb_node);
  37. }
  38. static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
  39. {
  40. struct sched_dl_entity *dl_se = &p->dl;
  41. return dl_rq->rb_leftmost == &dl_se->rb_node;
  42. }
  43. void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
  44. {
  45. raw_spin_lock_init(&dl_b->dl_runtime_lock);
  46. dl_b->dl_period = period;
  47. dl_b->dl_runtime = runtime;
  48. }
  49. void init_dl_bw(struct dl_bw *dl_b)
  50. {
  51. raw_spin_lock_init(&dl_b->lock);
  52. raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
  53. if (global_rt_runtime() == RUNTIME_INF)
  54. dl_b->bw = -1;
  55. else
  56. dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
  57. raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
  58. dl_b->total_bw = 0;
  59. }
  60. void init_dl_rq(struct dl_rq *dl_rq)
  61. {
  62. dl_rq->rb_root = RB_ROOT;
  63. #ifdef CONFIG_SMP
  64. /* zero means no -deadline tasks */
  65. dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
  66. dl_rq->dl_nr_migratory = 0;
  67. dl_rq->overloaded = 0;
  68. dl_rq->pushable_dl_tasks_root = RB_ROOT;
  69. #else
  70. init_dl_bw(&dl_rq->dl_bw);
  71. #endif
  72. }
  73. #ifdef CONFIG_SMP
  74. static inline int dl_overloaded(struct rq *rq)
  75. {
  76. return atomic_read(&rq->rd->dlo_count);
  77. }
  78. static inline void dl_set_overload(struct rq *rq)
  79. {
  80. if (!rq->online)
  81. return;
  82. cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
  83. /*
  84. * Must be visible before the overload count is
  85. * set (as in sched_rt.c).
  86. *
  87. * Matched by the barrier in pull_dl_task().
  88. */
  89. smp_wmb();
  90. atomic_inc(&rq->rd->dlo_count);
  91. }
  92. static inline void dl_clear_overload(struct rq *rq)
  93. {
  94. if (!rq->online)
  95. return;
  96. atomic_dec(&rq->rd->dlo_count);
  97. cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
  98. }
  99. static void update_dl_migration(struct dl_rq *dl_rq)
  100. {
  101. if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
  102. if (!dl_rq->overloaded) {
  103. dl_set_overload(rq_of_dl_rq(dl_rq));
  104. dl_rq->overloaded = 1;
  105. }
  106. } else if (dl_rq->overloaded) {
  107. dl_clear_overload(rq_of_dl_rq(dl_rq));
  108. dl_rq->overloaded = 0;
  109. }
  110. }
  111. static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
  112. {
  113. struct task_struct *p = dl_task_of(dl_se);
  114. if (p->nr_cpus_allowed > 1)
  115. dl_rq->dl_nr_migratory++;
  116. update_dl_migration(dl_rq);
  117. }
  118. static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
  119. {
  120. struct task_struct *p = dl_task_of(dl_se);
  121. if (p->nr_cpus_allowed > 1)
  122. dl_rq->dl_nr_migratory--;
  123. update_dl_migration(dl_rq);
  124. }
  125. /*
  126. * The list of pushable -deadline task is not a plist, like in
  127. * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
  128. */
  129. static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
  130. {
  131. struct dl_rq *dl_rq = &rq->dl;
  132. struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
  133. struct rb_node *parent = NULL;
  134. struct task_struct *entry;
  135. int leftmost = 1;
  136. BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
  137. while (*link) {
  138. parent = *link;
  139. entry = rb_entry(parent, struct task_struct,
  140. pushable_dl_tasks);
  141. if (dl_entity_preempt(&p->dl, &entry->dl))
  142. link = &parent->rb_left;
  143. else {
  144. link = &parent->rb_right;
  145. leftmost = 0;
  146. }
  147. }
  148. if (leftmost)
  149. dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
  150. rb_link_node(&p->pushable_dl_tasks, parent, link);
  151. rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
  152. }
  153. static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
  154. {
  155. struct dl_rq *dl_rq = &rq->dl;
  156. if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
  157. return;
  158. if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
  159. struct rb_node *next_node;
  160. next_node = rb_next(&p->pushable_dl_tasks);
  161. dl_rq->pushable_dl_tasks_leftmost = next_node;
  162. }
  163. rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
  164. RB_CLEAR_NODE(&p->pushable_dl_tasks);
  165. }
  166. static inline int has_pushable_dl_tasks(struct rq *rq)
  167. {
  168. return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
  169. }
  170. static int push_dl_task(struct rq *rq);
  171. static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
  172. {
  173. return dl_task(prev);
  174. }
  175. static inline void set_post_schedule(struct rq *rq)
  176. {
  177. rq->post_schedule = has_pushable_dl_tasks(rq);
  178. }
  179. #else
  180. static inline
  181. void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
  182. {
  183. }
  184. static inline
  185. void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
  186. {
  187. }
  188. static inline
  189. void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
  190. {
  191. }
  192. static inline
  193. void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
  194. {
  195. }
  196. static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
  197. {
  198. return false;
  199. }
  200. static inline int pull_dl_task(struct rq *rq)
  201. {
  202. return 0;
  203. }
  204. static inline void set_post_schedule(struct rq *rq)
  205. {
  206. }
  207. #endif /* CONFIG_SMP */
  208. static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
  209. static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
  210. static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
  211. int flags);
  212. /*
  213. * We are being explicitly informed that a new instance is starting,
  214. * and this means that:
  215. * - the absolute deadline of the entity has to be placed at
  216. * current time + relative deadline;
  217. * - the runtime of the entity has to be set to the maximum value.
  218. *
  219. * The capability of specifying such event is useful whenever a -deadline
  220. * entity wants to (try to!) synchronize its behaviour with the scheduler's
  221. * one, and to (try to!) reconcile itself with its own scheduling
  222. * parameters.
  223. */
  224. static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
  225. struct sched_dl_entity *pi_se)
  226. {
  227. struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
  228. struct rq *rq = rq_of_dl_rq(dl_rq);
  229. WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
  230. /*
  231. * We use the regular wall clock time to set deadlines in the
  232. * future; in fact, we must consider execution overheads (time
  233. * spent on hardirq context, etc.).
  234. */
  235. dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
  236. dl_se->runtime = pi_se->dl_runtime;
  237. dl_se->dl_new = 0;
  238. }
  239. /*
  240. * Pure Earliest Deadline First (EDF) scheduling does not deal with the
  241. * possibility of a entity lasting more than what it declared, and thus
  242. * exhausting its runtime.
  243. *
  244. * Here we are interested in making runtime overrun possible, but we do
  245. * not want a entity which is misbehaving to affect the scheduling of all
  246. * other entities.
  247. * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
  248. * is used, in order to confine each entity within its own bandwidth.
  249. *
  250. * This function deals exactly with that, and ensures that when the runtime
  251. * of a entity is replenished, its deadline is also postponed. That ensures
  252. * the overrunning entity can't interfere with other entity in the system and
  253. * can't make them miss their deadlines. Reasons why this kind of overruns
  254. * could happen are, typically, a entity voluntarily trying to overcome its
  255. * runtime, or it just underestimated it during sched_setattr().
  256. */
  257. static void replenish_dl_entity(struct sched_dl_entity *dl_se,
  258. struct sched_dl_entity *pi_se)
  259. {
  260. struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
  261. struct rq *rq = rq_of_dl_rq(dl_rq);
  262. BUG_ON(pi_se->dl_runtime <= 0);
  263. /*
  264. * This could be the case for a !-dl task that is boosted.
  265. * Just go with full inherited parameters.
  266. */
  267. if (dl_se->dl_deadline == 0) {
  268. dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
  269. dl_se->runtime = pi_se->dl_runtime;
  270. }
  271. /*
  272. * We keep moving the deadline away until we get some
  273. * available runtime for the entity. This ensures correct
  274. * handling of situations where the runtime overrun is
  275. * arbitrary large.
  276. */
  277. while (dl_se->runtime <= 0) {
  278. dl_se->deadline += pi_se->dl_period;
  279. dl_se->runtime += pi_se->dl_runtime;
  280. }
  281. /*
  282. * At this point, the deadline really should be "in
  283. * the future" with respect to rq->clock. If it's
  284. * not, we are, for some reason, lagging too much!
  285. * Anyway, after having warn userspace abut that,
  286. * we still try to keep the things running by
  287. * resetting the deadline and the budget of the
  288. * entity.
  289. */
  290. if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
  291. printk_deferred_once("sched: DL replenish lagged to much\n");
  292. dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
  293. dl_se->runtime = pi_se->dl_runtime;
  294. }
  295. if (dl_se->dl_yielded)
  296. dl_se->dl_yielded = 0;
  297. if (dl_se->dl_throttled)
  298. dl_se->dl_throttled = 0;
  299. }
  300. /*
  301. * Here we check if --at time t-- an entity (which is probably being
  302. * [re]activated or, in general, enqueued) can use its remaining runtime
  303. * and its current deadline _without_ exceeding the bandwidth it is
  304. * assigned (function returns true if it can't). We are in fact applying
  305. * one of the CBS rules: when a task wakes up, if the residual runtime
  306. * over residual deadline fits within the allocated bandwidth, then we
  307. * can keep the current (absolute) deadline and residual budget without
  308. * disrupting the schedulability of the system. Otherwise, we should
  309. * refill the runtime and set the deadline a period in the future,
  310. * because keeping the current (absolute) deadline of the task would
  311. * result in breaking guarantees promised to other tasks (refer to
  312. * Documentation/scheduler/sched-deadline.txt for more informations).
  313. *
  314. * This function returns true if:
  315. *
  316. * runtime / (deadline - t) > dl_runtime / dl_period ,
  317. *
  318. * IOW we can't recycle current parameters.
  319. *
  320. * Notice that the bandwidth check is done against the period. For
  321. * task with deadline equal to period this is the same of using
  322. * dl_deadline instead of dl_period in the equation above.
  323. */
  324. static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
  325. struct sched_dl_entity *pi_se, u64 t)
  326. {
  327. u64 left, right;
  328. /*
  329. * left and right are the two sides of the equation above,
  330. * after a bit of shuffling to use multiplications instead
  331. * of divisions.
  332. *
  333. * Note that none of the time values involved in the two
  334. * multiplications are absolute: dl_deadline and dl_runtime
  335. * are the relative deadline and the maximum runtime of each
  336. * instance, runtime is the runtime left for the last instance
  337. * and (deadline - t), since t is rq->clock, is the time left
  338. * to the (absolute) deadline. Even if overflowing the u64 type
  339. * is very unlikely to occur in both cases, here we scale down
  340. * as we want to avoid that risk at all. Scaling down by 10
  341. * means that we reduce granularity to 1us. We are fine with it,
  342. * since this is only a true/false check and, anyway, thinking
  343. * of anything below microseconds resolution is actually fiction
  344. * (but still we want to give the user that illusion >;).
  345. */
  346. left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
  347. right = ((dl_se->deadline - t) >> DL_SCALE) *
  348. (pi_se->dl_runtime >> DL_SCALE);
  349. return dl_time_before(right, left);
  350. }
  351. /*
  352. * When a -deadline entity is queued back on the runqueue, its runtime and
  353. * deadline might need updating.
  354. *
  355. * The policy here is that we update the deadline of the entity only if:
  356. * - the current deadline is in the past,
  357. * - using the remaining runtime with the current deadline would make
  358. * the entity exceed its bandwidth.
  359. */
  360. static void update_dl_entity(struct sched_dl_entity *dl_se,
  361. struct sched_dl_entity *pi_se)
  362. {
  363. struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
  364. struct rq *rq = rq_of_dl_rq(dl_rq);
  365. /*
  366. * The arrival of a new instance needs special treatment, i.e.,
  367. * the actual scheduling parameters have to be "renewed".
  368. */
  369. if (dl_se->dl_new) {
  370. setup_new_dl_entity(dl_se, pi_se);
  371. return;
  372. }
  373. if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
  374. dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
  375. dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
  376. dl_se->runtime = pi_se->dl_runtime;
  377. }
  378. }
  379. /*
  380. * If the entity depleted all its runtime, and if we want it to sleep
  381. * while waiting for some new execution time to become available, we
  382. * set the bandwidth enforcement timer to the replenishment instant
  383. * and try to activate it.
  384. *
  385. * Notice that it is important for the caller to know if the timer
  386. * actually started or not (i.e., the replenishment instant is in
  387. * the future or in the past).
  388. */
  389. static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
  390. {
  391. struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
  392. struct rq *rq = rq_of_dl_rq(dl_rq);
  393. ktime_t now, act;
  394. ktime_t soft, hard;
  395. unsigned long range;
  396. s64 delta;
  397. if (boosted)
  398. return 0;
  399. /*
  400. * We want the timer to fire at the deadline, but considering
  401. * that it is actually coming from rq->clock and not from
  402. * hrtimer's time base reading.
  403. */
  404. act = ns_to_ktime(dl_se->deadline);
  405. now = hrtimer_cb_get_time(&dl_se->dl_timer);
  406. delta = ktime_to_ns(now) - rq_clock(rq);
  407. act = ktime_add_ns(act, delta);
  408. /*
  409. * If the expiry time already passed, e.g., because the value
  410. * chosen as the deadline is too small, don't even try to
  411. * start the timer in the past!
  412. */
  413. if (ktime_us_delta(act, now) < 0)
  414. return 0;
  415. hrtimer_set_expires(&dl_se->dl_timer, act);
  416. soft = hrtimer_get_softexpires(&dl_se->dl_timer);
  417. hard = hrtimer_get_expires(&dl_se->dl_timer);
  418. range = ktime_to_ns(ktime_sub(hard, soft));
  419. __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
  420. range, HRTIMER_MODE_ABS, 0);
  421. return hrtimer_active(&dl_se->dl_timer);
  422. }
  423. /*
  424. * This is the bandwidth enforcement timer callback. If here, we know
  425. * a task is not on its dl_rq, since the fact that the timer was running
  426. * means the task is throttled and needs a runtime replenishment.
  427. *
  428. * However, what we actually do depends on the fact the task is active,
  429. * (it is on its rq) or has been removed from there by a call to
  430. * dequeue_task_dl(). In the former case we must issue the runtime
  431. * replenishment and add the task back to the dl_rq; in the latter, we just
  432. * do nothing but clearing dl_throttled, so that runtime and deadline
  433. * updating (and the queueing back to dl_rq) will be done by the
  434. * next call to enqueue_task_dl().
  435. */
  436. static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
  437. {
  438. struct sched_dl_entity *dl_se = container_of(timer,
  439. struct sched_dl_entity,
  440. dl_timer);
  441. struct task_struct *p = dl_task_of(dl_se);
  442. unsigned long flags;
  443. struct rq *rq;
  444. rq = task_rq_lock(current, &flags);
  445. /*
  446. * We need to take care of several possible races here:
  447. *
  448. * - the task might have changed its scheduling policy
  449. * to something different than SCHED_DEADLINE
  450. * - the task might have changed its reservation parameters
  451. * (through sched_setattr())
  452. * - the task might have been boosted by someone else and
  453. * might be in the boosting/deboosting path
  454. *
  455. * In all this cases we bail out, as the task is already
  456. * in the runqueue or is going to be enqueued back anyway.
  457. */
  458. if (!dl_task(p) || dl_se->dl_new ||
  459. dl_se->dl_boosted || !dl_se->dl_throttled)
  460. goto unlock;
  461. sched_clock_tick();
  462. update_rq_clock(rq);
  463. /*
  464. * If the throttle happened during sched-out; like:
  465. *
  466. * schedule()
  467. * deactivate_task()
  468. * dequeue_task_dl()
  469. * update_curr_dl()
  470. * start_dl_timer()
  471. * __dequeue_task_dl()
  472. * prev->on_rq = 0;
  473. *
  474. * We can be both throttled and !queued. Replenish the counter
  475. * but do not enqueue -- wait for our wakeup to do that.
  476. */
  477. if (!task_on_rq_queued(p)) {
  478. replenish_dl_entity(dl_se, dl_se);
  479. goto unlock;
  480. }
  481. enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
  482. if (dl_task(rq->curr))
  483. check_preempt_curr_dl(rq, p, 0);
  484. else
  485. resched_curr(rq);
  486. #ifdef CONFIG_SMP
  487. /*
  488. * Queueing this task back might have overloaded rq,
  489. * check if we need to kick someone away.
  490. */
  491. if (has_pushable_dl_tasks(rq))
  492. push_dl_task(rq);
  493. #endif
  494. unlock:
  495. task_rq_unlock(rq, current, &flags);
  496. return HRTIMER_NORESTART;
  497. }
  498. void init_dl_task_timer(struct sched_dl_entity *dl_se)
  499. {
  500. struct hrtimer *timer = &dl_se->dl_timer;
  501. hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  502. timer->function = dl_task_timer;
  503. }
  504. static
  505. int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
  506. {
  507. return (dl_se->runtime <= 0);
  508. }
  509. extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
  510. /*
  511. * Update the current task's runtime statistics (provided it is still
  512. * a -deadline task and has not been removed from the dl_rq).
  513. */
  514. static void update_curr_dl(struct rq *rq)
  515. {
  516. struct task_struct *curr = rq->curr;
  517. struct sched_dl_entity *dl_se = &curr->dl;
  518. u64 delta_exec;
  519. if (!dl_task(curr) || !on_dl_rq(dl_se))
  520. return;
  521. /*
  522. * Consumed budget is computed considering the time as
  523. * observed by schedulable tasks (excluding time spent
  524. * in hardirq context, etc.). Deadlines are instead
  525. * computed using hard walltime. This seems to be the more
  526. * natural solution, but the full ramifications of this
  527. * approach need further study.
  528. */
  529. delta_exec = rq_clock_task(rq) - curr->se.exec_start;
  530. if (unlikely((s64)delta_exec <= 0))
  531. return;
  532. schedstat_set(curr->se.statistics.exec_max,
  533. max(curr->se.statistics.exec_max, delta_exec));
  534. curr->se.sum_exec_runtime += delta_exec;
  535. account_group_exec_runtime(curr, delta_exec);
  536. curr->se.exec_start = rq_clock_task(rq);
  537. cpuacct_charge(curr, delta_exec);
  538. sched_rt_avg_update(rq, delta_exec);
  539. dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
  540. if (dl_runtime_exceeded(rq, dl_se)) {
  541. dl_se->dl_throttled = 1;
  542. __dequeue_task_dl(rq, curr, 0);
  543. if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
  544. enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
  545. if (!is_leftmost(curr, &rq->dl))
  546. resched_curr(rq);
  547. }
  548. /*
  549. * Because -- for now -- we share the rt bandwidth, we need to
  550. * account our runtime there too, otherwise actual rt tasks
  551. * would be able to exceed the shared quota.
  552. *
  553. * Account to the root rt group for now.
  554. *
  555. * The solution we're working towards is having the RT groups scheduled
  556. * using deadline servers -- however there's a few nasties to figure
  557. * out before that can happen.
  558. */
  559. if (rt_bandwidth_enabled()) {
  560. struct rt_rq *rt_rq = &rq->rt;
  561. raw_spin_lock(&rt_rq->rt_runtime_lock);
  562. /*
  563. * We'll let actual RT tasks worry about the overflow here, we
  564. * have our own CBS to keep us inline; only account when RT
  565. * bandwidth is relevant.
  566. */
  567. if (sched_rt_bandwidth_account(rt_rq))
  568. rt_rq->rt_time += delta_exec;
  569. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  570. }
  571. }
  572. #ifdef CONFIG_SMP
  573. static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
  574. static inline u64 next_deadline(struct rq *rq)
  575. {
  576. struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
  577. if (next && dl_prio(next->prio))
  578. return next->dl.deadline;
  579. else
  580. return 0;
  581. }
  582. static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
  583. {
  584. struct rq *rq = rq_of_dl_rq(dl_rq);
  585. if (dl_rq->earliest_dl.curr == 0 ||
  586. dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
  587. /*
  588. * If the dl_rq had no -deadline tasks, or if the new task
  589. * has shorter deadline than the current one on dl_rq, we
  590. * know that the previous earliest becomes our next earliest,
  591. * as the new task becomes the earliest itself.
  592. */
  593. dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
  594. dl_rq->earliest_dl.curr = deadline;
  595. cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
  596. } else if (dl_rq->earliest_dl.next == 0 ||
  597. dl_time_before(deadline, dl_rq->earliest_dl.next)) {
  598. /*
  599. * On the other hand, if the new -deadline task has a
  600. * a later deadline than the earliest one on dl_rq, but
  601. * it is earlier than the next (if any), we must
  602. * recompute the next-earliest.
  603. */
  604. dl_rq->earliest_dl.next = next_deadline(rq);
  605. }
  606. }
  607. static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
  608. {
  609. struct rq *rq = rq_of_dl_rq(dl_rq);
  610. /*
  611. * Since we may have removed our earliest (and/or next earliest)
  612. * task we must recompute them.
  613. */
  614. if (!dl_rq->dl_nr_running) {
  615. dl_rq->earliest_dl.curr = 0;
  616. dl_rq->earliest_dl.next = 0;
  617. cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
  618. } else {
  619. struct rb_node *leftmost = dl_rq->rb_leftmost;
  620. struct sched_dl_entity *entry;
  621. entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
  622. dl_rq->earliest_dl.curr = entry->deadline;
  623. dl_rq->earliest_dl.next = next_deadline(rq);
  624. cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
  625. }
  626. }
  627. #else
  628. static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
  629. static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
  630. #endif /* CONFIG_SMP */
  631. static inline
  632. void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
  633. {
  634. int prio = dl_task_of(dl_se)->prio;
  635. u64 deadline = dl_se->deadline;
  636. WARN_ON(!dl_prio(prio));
  637. dl_rq->dl_nr_running++;
  638. add_nr_running(rq_of_dl_rq(dl_rq), 1);
  639. inc_dl_deadline(dl_rq, deadline);
  640. inc_dl_migration(dl_se, dl_rq);
  641. }
  642. static inline
  643. void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
  644. {
  645. int prio = dl_task_of(dl_se)->prio;
  646. WARN_ON(!dl_prio(prio));
  647. WARN_ON(!dl_rq->dl_nr_running);
  648. dl_rq->dl_nr_running--;
  649. sub_nr_running(rq_of_dl_rq(dl_rq), 1);
  650. dec_dl_deadline(dl_rq, dl_se->deadline);
  651. dec_dl_migration(dl_se, dl_rq);
  652. }
  653. static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
  654. {
  655. struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
  656. struct rb_node **link = &dl_rq->rb_root.rb_node;
  657. struct rb_node *parent = NULL;
  658. struct sched_dl_entity *entry;
  659. int leftmost = 1;
  660. BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
  661. while (*link) {
  662. parent = *link;
  663. entry = rb_entry(parent, struct sched_dl_entity, rb_node);
  664. if (dl_time_before(dl_se->deadline, entry->deadline))
  665. link = &parent->rb_left;
  666. else {
  667. link = &parent->rb_right;
  668. leftmost = 0;
  669. }
  670. }
  671. if (leftmost)
  672. dl_rq->rb_leftmost = &dl_se->rb_node;
  673. rb_link_node(&dl_se->rb_node, parent, link);
  674. rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
  675. inc_dl_tasks(dl_se, dl_rq);
  676. }
  677. static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
  678. {
  679. struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
  680. if (RB_EMPTY_NODE(&dl_se->rb_node))
  681. return;
  682. if (dl_rq->rb_leftmost == &dl_se->rb_node) {
  683. struct rb_node *next_node;
  684. next_node = rb_next(&dl_se->rb_node);
  685. dl_rq->rb_leftmost = next_node;
  686. }
  687. rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
  688. RB_CLEAR_NODE(&dl_se->rb_node);
  689. dec_dl_tasks(dl_se, dl_rq);
  690. }
  691. static void
  692. enqueue_dl_entity(struct sched_dl_entity *dl_se,
  693. struct sched_dl_entity *pi_se, int flags)
  694. {
  695. BUG_ON(on_dl_rq(dl_se));
  696. /*
  697. * If this is a wakeup or a new instance, the scheduling
  698. * parameters of the task might need updating. Otherwise,
  699. * we want a replenishment of its runtime.
  700. */
  701. if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
  702. update_dl_entity(dl_se, pi_se);
  703. else if (flags & ENQUEUE_REPLENISH)
  704. replenish_dl_entity(dl_se, pi_se);
  705. __enqueue_dl_entity(dl_se);
  706. }
  707. static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
  708. {
  709. __dequeue_dl_entity(dl_se);
  710. }
  711. static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
  712. {
  713. struct task_struct *pi_task = rt_mutex_get_top_task(p);
  714. struct sched_dl_entity *pi_se = &p->dl;
  715. /*
  716. * Use the scheduling parameters of the top pi-waiter
  717. * task if we have one and its (relative) deadline is
  718. * smaller than our one... OTW we keep our runtime and
  719. * deadline.
  720. */
  721. if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
  722. pi_se = &pi_task->dl;
  723. } else if (!dl_prio(p->normal_prio)) {
  724. /*
  725. * Special case in which we have a !SCHED_DEADLINE task
  726. * that is going to be deboosted, but exceedes its
  727. * runtime while doing so. No point in replenishing
  728. * it, as it's going to return back to its original
  729. * scheduling class after this.
  730. */
  731. BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
  732. return;
  733. }
  734. /*
  735. * If p is throttled, we do nothing. In fact, if it exhausted
  736. * its budget it needs a replenishment and, since it now is on
  737. * its rq, the bandwidth timer callback (which clearly has not
  738. * run yet) will take care of this.
  739. */
  740. if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
  741. return;
  742. enqueue_dl_entity(&p->dl, pi_se, flags);
  743. if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
  744. enqueue_pushable_dl_task(rq, p);
  745. }
  746. static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
  747. {
  748. dequeue_dl_entity(&p->dl);
  749. dequeue_pushable_dl_task(rq, p);
  750. }
  751. static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
  752. {
  753. update_curr_dl(rq);
  754. __dequeue_task_dl(rq, p, flags);
  755. }
  756. /*
  757. * Yield task semantic for -deadline tasks is:
  758. *
  759. * get off from the CPU until our next instance, with
  760. * a new runtime. This is of little use now, since we
  761. * don't have a bandwidth reclaiming mechanism. Anyway,
  762. * bandwidth reclaiming is planned for the future, and
  763. * yield_task_dl will indicate that some spare budget
  764. * is available for other task instances to use it.
  765. */
  766. static void yield_task_dl(struct rq *rq)
  767. {
  768. struct task_struct *p = rq->curr;
  769. /*
  770. * We make the task go to sleep until its current deadline by
  771. * forcing its runtime to zero. This way, update_curr_dl() stops
  772. * it and the bandwidth timer will wake it up and will give it
  773. * new scheduling parameters (thanks to dl_yielded=1).
  774. */
  775. if (p->dl.runtime > 0) {
  776. rq->curr->dl.dl_yielded = 1;
  777. p->dl.runtime = 0;
  778. }
  779. update_rq_clock(rq);
  780. update_curr_dl(rq);
  781. /*
  782. * Tell update_rq_clock() that we've just updated,
  783. * so we don't do microscopic update in schedule()
  784. * and double the fastpath cost.
  785. */
  786. rq_clock_skip_update(rq, true);
  787. }
  788. #ifdef CONFIG_SMP
  789. static int find_later_rq(struct task_struct *task);
  790. static int
  791. select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
  792. {
  793. struct task_struct *curr;
  794. struct rq *rq;
  795. if (sd_flag != SD_BALANCE_WAKE)
  796. goto out;
  797. rq = cpu_rq(cpu);
  798. rcu_read_lock();
  799. curr = ACCESS_ONCE(rq->curr); /* unlocked access */
  800. /*
  801. * If we are dealing with a -deadline task, we must
  802. * decide where to wake it up.
  803. * If it has a later deadline and the current task
  804. * on this rq can't move (provided the waking task
  805. * can!) we prefer to send it somewhere else. On the
  806. * other hand, if it has a shorter deadline, we
  807. * try to make it stay here, it might be important.
  808. */
  809. if (unlikely(dl_task(curr)) &&
  810. (curr->nr_cpus_allowed < 2 ||
  811. !dl_entity_preempt(&p->dl, &curr->dl)) &&
  812. (p->nr_cpus_allowed > 1)) {
  813. int target = find_later_rq(p);
  814. if (target != -1)
  815. cpu = target;
  816. }
  817. rcu_read_unlock();
  818. out:
  819. return cpu;
  820. }
  821. static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
  822. {
  823. /*
  824. * Current can't be migrated, useless to reschedule,
  825. * let's hope p can move out.
  826. */
  827. if (rq->curr->nr_cpus_allowed == 1 ||
  828. cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
  829. return;
  830. /*
  831. * p is migratable, so let's not schedule it and
  832. * see if it is pushed or pulled somewhere else.
  833. */
  834. if (p->nr_cpus_allowed != 1 &&
  835. cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
  836. return;
  837. resched_curr(rq);
  838. }
  839. static int pull_dl_task(struct rq *this_rq);
  840. #endif /* CONFIG_SMP */
  841. /*
  842. * Only called when both the current and waking task are -deadline
  843. * tasks.
  844. */
  845. static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
  846. int flags)
  847. {
  848. if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
  849. resched_curr(rq);
  850. return;
  851. }
  852. #ifdef CONFIG_SMP
  853. /*
  854. * In the unlikely case current and p have the same deadline
  855. * let us try to decide what's the best thing to do...
  856. */
  857. if ((p->dl.deadline == rq->curr->dl.deadline) &&
  858. !test_tsk_need_resched(rq->curr))
  859. check_preempt_equal_dl(rq, p);
  860. #endif /* CONFIG_SMP */
  861. }
  862. #ifdef CONFIG_SCHED_HRTICK
  863. static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
  864. {
  865. hrtick_start(rq, p->dl.runtime);
  866. }
  867. #else /* !CONFIG_SCHED_HRTICK */
  868. static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
  869. {
  870. }
  871. #endif
  872. static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
  873. struct dl_rq *dl_rq)
  874. {
  875. struct rb_node *left = dl_rq->rb_leftmost;
  876. if (!left)
  877. return NULL;
  878. return rb_entry(left, struct sched_dl_entity, rb_node);
  879. }
  880. struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
  881. {
  882. struct sched_dl_entity *dl_se;
  883. struct task_struct *p;
  884. struct dl_rq *dl_rq;
  885. dl_rq = &rq->dl;
  886. if (need_pull_dl_task(rq, prev)) {
  887. pull_dl_task(rq);
  888. /*
  889. * pull_rt_task() can drop (and re-acquire) rq->lock; this
  890. * means a stop task can slip in, in which case we need to
  891. * re-start task selection.
  892. */
  893. if (rq->stop && task_on_rq_queued(rq->stop))
  894. return RETRY_TASK;
  895. }
  896. /*
  897. * When prev is DL, we may throttle it in put_prev_task().
  898. * So, we update time before we check for dl_nr_running.
  899. */
  900. if (prev->sched_class == &dl_sched_class)
  901. update_curr_dl(rq);
  902. if (unlikely(!dl_rq->dl_nr_running))
  903. return NULL;
  904. put_prev_task(rq, prev);
  905. dl_se = pick_next_dl_entity(rq, dl_rq);
  906. BUG_ON(!dl_se);
  907. p = dl_task_of(dl_se);
  908. p->se.exec_start = rq_clock_task(rq);
  909. /* Running task will never be pushed. */
  910. dequeue_pushable_dl_task(rq, p);
  911. if (hrtick_enabled(rq))
  912. start_hrtick_dl(rq, p);
  913. set_post_schedule(rq);
  914. return p;
  915. }
  916. static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
  917. {
  918. update_curr_dl(rq);
  919. if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
  920. enqueue_pushable_dl_task(rq, p);
  921. }
  922. static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
  923. {
  924. update_curr_dl(rq);
  925. /*
  926. * Even when we have runtime, update_curr_dl() might have resulted in us
  927. * not being the leftmost task anymore. In that case NEED_RESCHED will
  928. * be set and schedule() will start a new hrtick for the next task.
  929. */
  930. if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
  931. is_leftmost(p, &rq->dl))
  932. start_hrtick_dl(rq, p);
  933. }
  934. static void task_fork_dl(struct task_struct *p)
  935. {
  936. /*
  937. * SCHED_DEADLINE tasks cannot fork and this is achieved through
  938. * sched_fork()
  939. */
  940. }
  941. static void task_dead_dl(struct task_struct *p)
  942. {
  943. struct hrtimer *timer = &p->dl.dl_timer;
  944. struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
  945. /*
  946. * Since we are TASK_DEAD we won't slip out of the domain!
  947. */
  948. raw_spin_lock_irq(&dl_b->lock);
  949. /* XXX we should retain the bw until 0-lag */
  950. dl_b->total_bw -= p->dl.dl_bw;
  951. raw_spin_unlock_irq(&dl_b->lock);
  952. hrtimer_cancel(timer);
  953. }
  954. static void set_curr_task_dl(struct rq *rq)
  955. {
  956. struct task_struct *p = rq->curr;
  957. p->se.exec_start = rq_clock_task(rq);
  958. /* You can't push away the running task */
  959. dequeue_pushable_dl_task(rq, p);
  960. }
  961. #ifdef CONFIG_SMP
  962. /* Only try algorithms three times */
  963. #define DL_MAX_TRIES 3
  964. static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
  965. {
  966. if (!task_running(rq, p) &&
  967. cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
  968. return 1;
  969. return 0;
  970. }
  971. /* Returns the second earliest -deadline task, NULL otherwise */
  972. static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
  973. {
  974. struct rb_node *next_node = rq->dl.rb_leftmost;
  975. struct sched_dl_entity *dl_se;
  976. struct task_struct *p = NULL;
  977. next_node:
  978. next_node = rb_next(next_node);
  979. if (next_node) {
  980. dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
  981. p = dl_task_of(dl_se);
  982. if (pick_dl_task(rq, p, cpu))
  983. return p;
  984. goto next_node;
  985. }
  986. return NULL;
  987. }
  988. static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
  989. static int find_later_rq(struct task_struct *task)
  990. {
  991. struct sched_domain *sd;
  992. struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
  993. int this_cpu = smp_processor_id();
  994. int best_cpu, cpu = task_cpu(task);
  995. /* Make sure the mask is initialized first */
  996. if (unlikely(!later_mask))
  997. return -1;
  998. if (task->nr_cpus_allowed == 1)
  999. return -1;
  1000. /*
  1001. * We have to consider system topology and task affinity
  1002. * first, then we can look for a suitable cpu.
  1003. */
  1004. best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
  1005. task, later_mask);
  1006. if (best_cpu == -1)
  1007. return -1;
  1008. /*
  1009. * If we are here, some target has been found,
  1010. * the most suitable of which is cached in best_cpu.
  1011. * This is, among the runqueues where the current tasks
  1012. * have later deadlines than the task's one, the rq
  1013. * with the latest possible one.
  1014. *
  1015. * Now we check how well this matches with task's
  1016. * affinity and system topology.
  1017. *
  1018. * The last cpu where the task run is our first
  1019. * guess, since it is most likely cache-hot there.
  1020. */
  1021. if (cpumask_test_cpu(cpu, later_mask))
  1022. return cpu;
  1023. /*
  1024. * Check if this_cpu is to be skipped (i.e., it is
  1025. * not in the mask) or not.
  1026. */
  1027. if (!cpumask_test_cpu(this_cpu, later_mask))
  1028. this_cpu = -1;
  1029. rcu_read_lock();
  1030. for_each_domain(cpu, sd) {
  1031. if (sd->flags & SD_WAKE_AFFINE) {
  1032. /*
  1033. * If possible, preempting this_cpu is
  1034. * cheaper than migrating.
  1035. */
  1036. if (this_cpu != -1 &&
  1037. cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
  1038. rcu_read_unlock();
  1039. return this_cpu;
  1040. }
  1041. /*
  1042. * Last chance: if best_cpu is valid and is
  1043. * in the mask, that becomes our choice.
  1044. */
  1045. if (best_cpu < nr_cpu_ids &&
  1046. cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
  1047. rcu_read_unlock();
  1048. return best_cpu;
  1049. }
  1050. }
  1051. }
  1052. rcu_read_unlock();
  1053. /*
  1054. * At this point, all our guesses failed, we just return
  1055. * 'something', and let the caller sort the things out.
  1056. */
  1057. if (this_cpu != -1)
  1058. return this_cpu;
  1059. cpu = cpumask_any(later_mask);
  1060. if (cpu < nr_cpu_ids)
  1061. return cpu;
  1062. return -1;
  1063. }
  1064. /* Locks the rq it finds */
  1065. static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
  1066. {
  1067. struct rq *later_rq = NULL;
  1068. int tries;
  1069. int cpu;
  1070. for (tries = 0; tries < DL_MAX_TRIES; tries++) {
  1071. cpu = find_later_rq(task);
  1072. if ((cpu == -1) || (cpu == rq->cpu))
  1073. break;
  1074. later_rq = cpu_rq(cpu);
  1075. /* Retry if something changed. */
  1076. if (double_lock_balance(rq, later_rq)) {
  1077. if (unlikely(task_rq(task) != rq ||
  1078. !cpumask_test_cpu(later_rq->cpu,
  1079. &task->cpus_allowed) ||
  1080. task_running(rq, task) ||
  1081. !task_on_rq_queued(task))) {
  1082. double_unlock_balance(rq, later_rq);
  1083. later_rq = NULL;
  1084. break;
  1085. }
  1086. }
  1087. /*
  1088. * If the rq we found has no -deadline task, or
  1089. * its earliest one has a later deadline than our
  1090. * task, the rq is a good one.
  1091. */
  1092. if (!later_rq->dl.dl_nr_running ||
  1093. dl_time_before(task->dl.deadline,
  1094. later_rq->dl.earliest_dl.curr))
  1095. break;
  1096. /* Otherwise we try again. */
  1097. double_unlock_balance(rq, later_rq);
  1098. later_rq = NULL;
  1099. }
  1100. return later_rq;
  1101. }
  1102. static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
  1103. {
  1104. struct task_struct *p;
  1105. if (!has_pushable_dl_tasks(rq))
  1106. return NULL;
  1107. p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
  1108. struct task_struct, pushable_dl_tasks);
  1109. BUG_ON(rq->cpu != task_cpu(p));
  1110. BUG_ON(task_current(rq, p));
  1111. BUG_ON(p->nr_cpus_allowed <= 1);
  1112. BUG_ON(!task_on_rq_queued(p));
  1113. BUG_ON(!dl_task(p));
  1114. return p;
  1115. }
  1116. /*
  1117. * See if the non running -deadline tasks on this rq
  1118. * can be sent to some other CPU where they can preempt
  1119. * and start executing.
  1120. */
  1121. static int push_dl_task(struct rq *rq)
  1122. {
  1123. struct task_struct *next_task;
  1124. struct rq *later_rq;
  1125. int ret = 0;
  1126. if (!rq->dl.overloaded)
  1127. return 0;
  1128. next_task = pick_next_pushable_dl_task(rq);
  1129. if (!next_task)
  1130. return 0;
  1131. retry:
  1132. if (unlikely(next_task == rq->curr)) {
  1133. WARN_ON(1);
  1134. return 0;
  1135. }
  1136. /*
  1137. * If next_task preempts rq->curr, and rq->curr
  1138. * can move away, it makes sense to just reschedule
  1139. * without going further in pushing next_task.
  1140. */
  1141. if (dl_task(rq->curr) &&
  1142. dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
  1143. rq->curr->nr_cpus_allowed > 1) {
  1144. resched_curr(rq);
  1145. return 0;
  1146. }
  1147. /* We might release rq lock */
  1148. get_task_struct(next_task);
  1149. /* Will lock the rq it'll find */
  1150. later_rq = find_lock_later_rq(next_task, rq);
  1151. if (!later_rq) {
  1152. struct task_struct *task;
  1153. /*
  1154. * We must check all this again, since
  1155. * find_lock_later_rq releases rq->lock and it is
  1156. * then possible that next_task has migrated.
  1157. */
  1158. task = pick_next_pushable_dl_task(rq);
  1159. if (task_cpu(next_task) == rq->cpu && task == next_task) {
  1160. /*
  1161. * The task is still there. We don't try
  1162. * again, some other cpu will pull it when ready.
  1163. */
  1164. goto out;
  1165. }
  1166. if (!task)
  1167. /* No more tasks */
  1168. goto out;
  1169. put_task_struct(next_task);
  1170. next_task = task;
  1171. goto retry;
  1172. }
  1173. deactivate_task(rq, next_task, 0);
  1174. set_task_cpu(next_task, later_rq->cpu);
  1175. activate_task(later_rq, next_task, 0);
  1176. ret = 1;
  1177. resched_curr(later_rq);
  1178. double_unlock_balance(rq, later_rq);
  1179. out:
  1180. put_task_struct(next_task);
  1181. return ret;
  1182. }
  1183. static void push_dl_tasks(struct rq *rq)
  1184. {
  1185. /* Terminates as it moves a -deadline task */
  1186. while (push_dl_task(rq))
  1187. ;
  1188. }
  1189. static int pull_dl_task(struct rq *this_rq)
  1190. {
  1191. int this_cpu = this_rq->cpu, ret = 0, cpu;
  1192. struct task_struct *p;
  1193. struct rq *src_rq;
  1194. u64 dmin = LONG_MAX;
  1195. if (likely(!dl_overloaded(this_rq)))
  1196. return 0;
  1197. /*
  1198. * Match the barrier from dl_set_overloaded; this guarantees that if we
  1199. * see overloaded we must also see the dlo_mask bit.
  1200. */
  1201. smp_rmb();
  1202. for_each_cpu(cpu, this_rq->rd->dlo_mask) {
  1203. if (this_cpu == cpu)
  1204. continue;
  1205. src_rq = cpu_rq(cpu);
  1206. /*
  1207. * It looks racy, abd it is! However, as in sched_rt.c,
  1208. * we are fine with this.
  1209. */
  1210. if (this_rq->dl.dl_nr_running &&
  1211. dl_time_before(this_rq->dl.earliest_dl.curr,
  1212. src_rq->dl.earliest_dl.next))
  1213. continue;
  1214. /* Might drop this_rq->lock */
  1215. double_lock_balance(this_rq, src_rq);
  1216. /*
  1217. * If there are no more pullable tasks on the
  1218. * rq, we're done with it.
  1219. */
  1220. if (src_rq->dl.dl_nr_running <= 1)
  1221. goto skip;
  1222. p = pick_next_earliest_dl_task(src_rq, this_cpu);
  1223. /*
  1224. * We found a task to be pulled if:
  1225. * - it preempts our current (if there's one),
  1226. * - it will preempt the last one we pulled (if any).
  1227. */
  1228. if (p && dl_time_before(p->dl.deadline, dmin) &&
  1229. (!this_rq->dl.dl_nr_running ||
  1230. dl_time_before(p->dl.deadline,
  1231. this_rq->dl.earliest_dl.curr))) {
  1232. WARN_ON(p == src_rq->curr);
  1233. WARN_ON(!task_on_rq_queued(p));
  1234. /*
  1235. * Then we pull iff p has actually an earlier
  1236. * deadline than the current task of its runqueue.
  1237. */
  1238. if (dl_time_before(p->dl.deadline,
  1239. src_rq->curr->dl.deadline))
  1240. goto skip;
  1241. ret = 1;
  1242. deactivate_task(src_rq, p, 0);
  1243. set_task_cpu(p, this_cpu);
  1244. activate_task(this_rq, p, 0);
  1245. dmin = p->dl.deadline;
  1246. /* Is there any other task even earlier? */
  1247. }
  1248. skip:
  1249. double_unlock_balance(this_rq, src_rq);
  1250. }
  1251. return ret;
  1252. }
  1253. static void post_schedule_dl(struct rq *rq)
  1254. {
  1255. push_dl_tasks(rq);
  1256. }
  1257. /*
  1258. * Since the task is not running and a reschedule is not going to happen
  1259. * anytime soon on its runqueue, we try pushing it away now.
  1260. */
  1261. static void task_woken_dl(struct rq *rq, struct task_struct *p)
  1262. {
  1263. if (!task_running(rq, p) &&
  1264. !test_tsk_need_resched(rq->curr) &&
  1265. has_pushable_dl_tasks(rq) &&
  1266. p->nr_cpus_allowed > 1 &&
  1267. dl_task(rq->curr) &&
  1268. (rq->curr->nr_cpus_allowed < 2 ||
  1269. !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
  1270. push_dl_tasks(rq);
  1271. }
  1272. }
  1273. static void set_cpus_allowed_dl(struct task_struct *p,
  1274. const struct cpumask *new_mask)
  1275. {
  1276. struct rq *rq;
  1277. struct root_domain *src_rd;
  1278. int weight;
  1279. BUG_ON(!dl_task(p));
  1280. rq = task_rq(p);
  1281. src_rd = rq->rd;
  1282. /*
  1283. * Migrating a SCHED_DEADLINE task between exclusive
  1284. * cpusets (different root_domains) entails a bandwidth
  1285. * update. We already made space for us in the destination
  1286. * domain (see cpuset_can_attach()).
  1287. */
  1288. if (!cpumask_intersects(src_rd->span, new_mask)) {
  1289. struct dl_bw *src_dl_b;
  1290. src_dl_b = dl_bw_of(cpu_of(rq));
  1291. /*
  1292. * We now free resources of the root_domain we are migrating
  1293. * off. In the worst case, sched_setattr() may temporary fail
  1294. * until we complete the update.
  1295. */
  1296. raw_spin_lock(&src_dl_b->lock);
  1297. __dl_clear(src_dl_b, p->dl.dl_bw);
  1298. raw_spin_unlock(&src_dl_b->lock);
  1299. }
  1300. /*
  1301. * Update only if the task is actually running (i.e.,
  1302. * it is on the rq AND it is not throttled).
  1303. */
  1304. if (!on_dl_rq(&p->dl))
  1305. return;
  1306. weight = cpumask_weight(new_mask);
  1307. /*
  1308. * Only update if the process changes its state from whether it
  1309. * can migrate or not.
  1310. */
  1311. if ((p->nr_cpus_allowed > 1) == (weight > 1))
  1312. return;
  1313. /*
  1314. * The process used to be able to migrate OR it can now migrate
  1315. */
  1316. if (weight <= 1) {
  1317. if (!task_current(rq, p))
  1318. dequeue_pushable_dl_task(rq, p);
  1319. BUG_ON(!rq->dl.dl_nr_migratory);
  1320. rq->dl.dl_nr_migratory--;
  1321. } else {
  1322. if (!task_current(rq, p))
  1323. enqueue_pushable_dl_task(rq, p);
  1324. rq->dl.dl_nr_migratory++;
  1325. }
  1326. update_dl_migration(&rq->dl);
  1327. }
  1328. /* Assumes rq->lock is held */
  1329. static void rq_online_dl(struct rq *rq)
  1330. {
  1331. if (rq->dl.overloaded)
  1332. dl_set_overload(rq);
  1333. cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
  1334. if (rq->dl.dl_nr_running > 0)
  1335. cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
  1336. }
  1337. /* Assumes rq->lock is held */
  1338. static void rq_offline_dl(struct rq *rq)
  1339. {
  1340. if (rq->dl.overloaded)
  1341. dl_clear_overload(rq);
  1342. cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
  1343. cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
  1344. }
  1345. void init_sched_dl_class(void)
  1346. {
  1347. unsigned int i;
  1348. for_each_possible_cpu(i)
  1349. zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
  1350. GFP_KERNEL, cpu_to_node(i));
  1351. }
  1352. #endif /* CONFIG_SMP */
  1353. /*
  1354. * Ensure p's dl_timer is cancelled. May drop rq->lock for a while.
  1355. */
  1356. static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
  1357. {
  1358. struct hrtimer *dl_timer = &p->dl.dl_timer;
  1359. /* Nobody will change task's class if pi_lock is held */
  1360. lockdep_assert_held(&p->pi_lock);
  1361. if (hrtimer_active(dl_timer)) {
  1362. int ret = hrtimer_try_to_cancel(dl_timer);
  1363. if (unlikely(ret == -1)) {
  1364. /*
  1365. * Note, p may migrate OR new deadline tasks
  1366. * may appear in rq when we are unlocking it.
  1367. * A caller of us must be fine with that.
  1368. */
  1369. raw_spin_unlock(&rq->lock);
  1370. hrtimer_cancel(dl_timer);
  1371. raw_spin_lock(&rq->lock);
  1372. }
  1373. }
  1374. }
  1375. static void switched_from_dl(struct rq *rq, struct task_struct *p)
  1376. {
  1377. /* XXX we should retain the bw until 0-lag */
  1378. cancel_dl_timer(rq, p);
  1379. __dl_clear_params(p);
  1380. /*
  1381. * Since this might be the only -deadline task on the rq,
  1382. * this is the right place to try to pull some other one
  1383. * from an overloaded cpu, if any.
  1384. */
  1385. if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
  1386. return;
  1387. if (pull_dl_task(rq))
  1388. resched_curr(rq);
  1389. }
  1390. /*
  1391. * When switching to -deadline, we may overload the rq, then
  1392. * we try to push someone off, if possible.
  1393. */
  1394. static void switched_to_dl(struct rq *rq, struct task_struct *p)
  1395. {
  1396. int check_resched = 1;
  1397. if (task_on_rq_queued(p) && rq->curr != p) {
  1398. #ifdef CONFIG_SMP
  1399. if (p->nr_cpus_allowed > 1 && rq->dl.overloaded &&
  1400. push_dl_task(rq) && rq != task_rq(p))
  1401. /* Only reschedule if pushing failed */
  1402. check_resched = 0;
  1403. #endif /* CONFIG_SMP */
  1404. if (check_resched) {
  1405. if (dl_task(rq->curr))
  1406. check_preempt_curr_dl(rq, p, 0);
  1407. else
  1408. resched_curr(rq);
  1409. }
  1410. }
  1411. }
  1412. /*
  1413. * If the scheduling parameters of a -deadline task changed,
  1414. * a push or pull operation might be needed.
  1415. */
  1416. static void prio_changed_dl(struct rq *rq, struct task_struct *p,
  1417. int oldprio)
  1418. {
  1419. if (task_on_rq_queued(p) || rq->curr == p) {
  1420. #ifdef CONFIG_SMP
  1421. /*
  1422. * This might be too much, but unfortunately
  1423. * we don't have the old deadline value, and
  1424. * we can't argue if the task is increasing
  1425. * or lowering its prio, so...
  1426. */
  1427. if (!rq->dl.overloaded)
  1428. pull_dl_task(rq);
  1429. /*
  1430. * If we now have a earlier deadline task than p,
  1431. * then reschedule, provided p is still on this
  1432. * runqueue.
  1433. */
  1434. if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
  1435. rq->curr == p)
  1436. resched_curr(rq);
  1437. #else
  1438. /*
  1439. * Again, we don't know if p has a earlier
  1440. * or later deadline, so let's blindly set a
  1441. * (maybe not needed) rescheduling point.
  1442. */
  1443. resched_curr(rq);
  1444. #endif /* CONFIG_SMP */
  1445. } else
  1446. switched_to_dl(rq, p);
  1447. }
  1448. const struct sched_class dl_sched_class = {
  1449. .next = &rt_sched_class,
  1450. .enqueue_task = enqueue_task_dl,
  1451. .dequeue_task = dequeue_task_dl,
  1452. .yield_task = yield_task_dl,
  1453. .check_preempt_curr = check_preempt_curr_dl,
  1454. .pick_next_task = pick_next_task_dl,
  1455. .put_prev_task = put_prev_task_dl,
  1456. #ifdef CONFIG_SMP
  1457. .select_task_rq = select_task_rq_dl,
  1458. .set_cpus_allowed = set_cpus_allowed_dl,
  1459. .rq_online = rq_online_dl,
  1460. .rq_offline = rq_offline_dl,
  1461. .post_schedule = post_schedule_dl,
  1462. .task_woken = task_woken_dl,
  1463. #endif
  1464. .set_curr_task = set_curr_task_dl,
  1465. .task_tick = task_tick_dl,
  1466. .task_fork = task_fork_dl,
  1467. .task_dead = task_dead_dl,
  1468. .prio_changed = prio_changed_dl,
  1469. .switched_from = switched_from_dl,
  1470. .switched_to = switched_to_dl,
  1471. .update_curr = update_curr_dl,
  1472. };
  1473. #ifdef CONFIG_SCHED_DEBUG
  1474. extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
  1475. void print_dl_stats(struct seq_file *m, int cpu)
  1476. {
  1477. print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
  1478. }
  1479. #endif /* CONFIG_SCHED_DEBUG */