cpufreq_schedutil.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. /*
  2. * CPUFreq governor based on scheduler-provided CPU utilization data.
  3. *
  4. * Copyright (C) 2016, Intel Corporation
  5. * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/cpufreq.h>
  13. #include <linux/kthread.h>
  14. #include <uapi/linux/sched/types.h>
  15. #include <linux/slab.h>
  16. #include <trace/events/power.h>
  17. #include "sched.h"
  18. #define SUGOV_KTHREAD_PRIORITY 50
  19. struct sugov_tunables {
  20. struct gov_attr_set attr_set;
  21. unsigned int rate_limit_us;
  22. };
  23. struct sugov_policy {
  24. struct cpufreq_policy *policy;
  25. struct sugov_tunables *tunables;
  26. struct list_head tunables_hook;
  27. raw_spinlock_t update_lock; /* For shared policies */
  28. u64 last_freq_update_time;
  29. s64 freq_update_delay_ns;
  30. unsigned int next_freq;
  31. unsigned int cached_raw_freq;
  32. /* The next fields are only needed if fast switch cannot be used. */
  33. struct irq_work irq_work;
  34. struct kthread_work work;
  35. struct mutex work_lock;
  36. struct kthread_worker worker;
  37. struct task_struct *thread;
  38. bool work_in_progress;
  39. bool need_freq_update;
  40. };
  41. struct sugov_cpu {
  42. struct update_util_data update_util;
  43. struct sugov_policy *sg_policy;
  44. unsigned int cpu;
  45. bool iowait_boost_pending;
  46. unsigned int iowait_boost;
  47. unsigned int iowait_boost_max;
  48. u64 last_update;
  49. /* The fields below are only needed when sharing a policy. */
  50. unsigned long util;
  51. unsigned long max;
  52. unsigned int flags;
  53. /* The field below is for single-CPU policies only. */
  54. #ifdef CONFIG_NO_HZ_COMMON
  55. unsigned long saved_idle_calls;
  56. #endif
  57. };
  58. static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
  59. /************************ Governor internals ***********************/
  60. static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
  61. {
  62. s64 delta_ns;
  63. /*
  64. * Since cpufreq_update_util() is called with rq->lock held for
  65. * the @target_cpu, our per-cpu data is fully serialized.
  66. *
  67. * However, drivers cannot in general deal with cross-cpu
  68. * requests, so while get_next_freq() will work, our
  69. * sugov_update_commit() call may not for the fast switching platforms.
  70. *
  71. * Hence stop here for remote requests if they aren't supported
  72. * by the hardware, as calculating the frequency is pointless if
  73. * we cannot in fact act on it.
  74. *
  75. * For the slow switching platforms, the kthread is always scheduled on
  76. * the right set of CPUs and any CPU can find the next frequency and
  77. * schedule the kthread.
  78. */
  79. if (sg_policy->policy->fast_switch_enabled &&
  80. !cpufreq_can_do_remote_dvfs(sg_policy->policy))
  81. return false;
  82. if (sg_policy->work_in_progress)
  83. return false;
  84. if (unlikely(sg_policy->need_freq_update)) {
  85. sg_policy->need_freq_update = false;
  86. /*
  87. * This happens when limits change, so forget the previous
  88. * next_freq value and force an update.
  89. */
  90. sg_policy->next_freq = UINT_MAX;
  91. return true;
  92. }
  93. delta_ns = time - sg_policy->last_freq_update_time;
  94. return delta_ns >= sg_policy->freq_update_delay_ns;
  95. }
  96. static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
  97. unsigned int next_freq)
  98. {
  99. struct cpufreq_policy *policy = sg_policy->policy;
  100. if (sg_policy->next_freq == next_freq)
  101. return;
  102. sg_policy->next_freq = next_freq;
  103. sg_policy->last_freq_update_time = time;
  104. if (policy->fast_switch_enabled) {
  105. next_freq = cpufreq_driver_fast_switch(policy, next_freq);
  106. if (!next_freq)
  107. return;
  108. policy->cur = next_freq;
  109. trace_cpu_frequency(next_freq, smp_processor_id());
  110. } else {
  111. sg_policy->work_in_progress = true;
  112. irq_work_queue(&sg_policy->irq_work);
  113. }
  114. }
  115. /**
  116. * get_next_freq - Compute a new frequency for a given cpufreq policy.
  117. * @sg_policy: schedutil policy object to compute the new frequency for.
  118. * @util: Current CPU utilization.
  119. * @max: CPU capacity.
  120. *
  121. * If the utilization is frequency-invariant, choose the new frequency to be
  122. * proportional to it, that is
  123. *
  124. * next_freq = C * max_freq * util / max
  125. *
  126. * Otherwise, approximate the would-be frequency-invariant utilization by
  127. * util_raw * (curr_freq / max_freq) which leads to
  128. *
  129. * next_freq = C * curr_freq * util_raw / max
  130. *
  131. * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
  132. *
  133. * The lowest driver-supported frequency which is equal or greater than the raw
  134. * next_freq (as calculated above) is returned, subject to policy min/max and
  135. * cpufreq driver limitations.
  136. */
  137. static unsigned int get_next_freq(struct sugov_policy *sg_policy,
  138. unsigned long util, unsigned long max)
  139. {
  140. struct cpufreq_policy *policy = sg_policy->policy;
  141. unsigned int freq = arch_scale_freq_invariant() ?
  142. policy->cpuinfo.max_freq : policy->cur;
  143. freq = (freq + (freq >> 2)) * util / max;
  144. if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
  145. return sg_policy->next_freq;
  146. sg_policy->cached_raw_freq = freq;
  147. return cpufreq_driver_resolve_freq(policy, freq);
  148. }
  149. static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
  150. {
  151. struct rq *rq = cpu_rq(cpu);
  152. unsigned long util_cfs = cpu_util_cfs(rq);
  153. unsigned long util_dl = cpu_util_dl(rq);
  154. *max = arch_scale_cpu_capacity(NULL, cpu);
  155. /*
  156. * Ideally we would like to set util_dl as min/guaranteed freq and
  157. * util_cfs + util_dl as requested freq. However, cpufreq is not yet
  158. * ready for such an interface. So, we only do the latter for now.
  159. */
  160. *util = min(util_cfs + util_dl, *max);
  161. }
  162. static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time)
  163. {
  164. if (sg_cpu->flags & SCHED_CPUFREQ_IOWAIT) {
  165. if (sg_cpu->iowait_boost_pending)
  166. return;
  167. sg_cpu->iowait_boost_pending = true;
  168. if (sg_cpu->iowait_boost) {
  169. sg_cpu->iowait_boost <<= 1;
  170. if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
  171. sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
  172. } else {
  173. sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
  174. }
  175. } else if (sg_cpu->iowait_boost) {
  176. s64 delta_ns = time - sg_cpu->last_update;
  177. /* Clear iowait_boost if the CPU apprears to have been idle. */
  178. if (delta_ns > TICK_NSEC) {
  179. sg_cpu->iowait_boost = 0;
  180. sg_cpu->iowait_boost_pending = false;
  181. }
  182. }
  183. }
  184. static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
  185. unsigned long *max)
  186. {
  187. unsigned int boost_util, boost_max;
  188. if (!sg_cpu->iowait_boost)
  189. return;
  190. if (sg_cpu->iowait_boost_pending) {
  191. sg_cpu->iowait_boost_pending = false;
  192. } else {
  193. sg_cpu->iowait_boost >>= 1;
  194. if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
  195. sg_cpu->iowait_boost = 0;
  196. return;
  197. }
  198. }
  199. boost_util = sg_cpu->iowait_boost;
  200. boost_max = sg_cpu->iowait_boost_max;
  201. if (*util * boost_max < *max * boost_util) {
  202. *util = boost_util;
  203. *max = boost_max;
  204. }
  205. }
  206. #ifdef CONFIG_NO_HZ_COMMON
  207. static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
  208. {
  209. unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
  210. bool ret = idle_calls == sg_cpu->saved_idle_calls;
  211. sg_cpu->saved_idle_calls = idle_calls;
  212. return ret;
  213. }
  214. #else
  215. static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
  216. #endif /* CONFIG_NO_HZ_COMMON */
  217. static void sugov_update_single(struct update_util_data *hook, u64 time,
  218. unsigned int flags)
  219. {
  220. struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
  221. struct sugov_policy *sg_policy = sg_cpu->sg_policy;
  222. struct cpufreq_policy *policy = sg_policy->policy;
  223. unsigned long util, max;
  224. unsigned int next_f;
  225. bool busy;
  226. sugov_set_iowait_boost(sg_cpu, time);
  227. sg_cpu->last_update = time;
  228. if (!sugov_should_update_freq(sg_policy, time))
  229. return;
  230. busy = sugov_cpu_is_busy(sg_cpu);
  231. if (flags & SCHED_CPUFREQ_RT) {
  232. next_f = policy->cpuinfo.max_freq;
  233. } else {
  234. sugov_get_util(&util, &max, sg_cpu->cpu);
  235. sugov_iowait_boost(sg_cpu, &util, &max);
  236. next_f = get_next_freq(sg_policy, util, max);
  237. /*
  238. * Do not reduce the frequency if the CPU has not been idle
  239. * recently, as the reduction is likely to be premature then.
  240. */
  241. if (busy && next_f < sg_policy->next_freq) {
  242. next_f = sg_policy->next_freq;
  243. /* Reset cached freq as next_freq has changed */
  244. sg_policy->cached_raw_freq = 0;
  245. }
  246. }
  247. sugov_update_commit(sg_policy, time, next_f);
  248. }
  249. static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
  250. {
  251. struct sugov_policy *sg_policy = sg_cpu->sg_policy;
  252. struct cpufreq_policy *policy = sg_policy->policy;
  253. unsigned long util = 0, max = 1;
  254. unsigned int j;
  255. for_each_cpu(j, policy->cpus) {
  256. struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
  257. unsigned long j_util, j_max;
  258. s64 delta_ns;
  259. /*
  260. * If the CPU utilization was last updated before the previous
  261. * frequency update and the time elapsed between the last update
  262. * of the CPU utilization and the last frequency update is long
  263. * enough, don't take the CPU into account as it probably is
  264. * idle now (and clear iowait_boost for it).
  265. */
  266. delta_ns = time - j_sg_cpu->last_update;
  267. if (delta_ns > TICK_NSEC) {
  268. j_sg_cpu->iowait_boost = 0;
  269. j_sg_cpu->iowait_boost_pending = false;
  270. continue;
  271. }
  272. if (j_sg_cpu->flags & SCHED_CPUFREQ_RT)
  273. return policy->cpuinfo.max_freq;
  274. j_util = j_sg_cpu->util;
  275. j_max = j_sg_cpu->max;
  276. if (j_util * max > j_max * util) {
  277. util = j_util;
  278. max = j_max;
  279. }
  280. sugov_iowait_boost(j_sg_cpu, &util, &max);
  281. }
  282. return get_next_freq(sg_policy, util, max);
  283. }
  284. static void sugov_update_shared(struct update_util_data *hook, u64 time,
  285. unsigned int flags)
  286. {
  287. struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
  288. struct sugov_policy *sg_policy = sg_cpu->sg_policy;
  289. unsigned long util, max;
  290. unsigned int next_f;
  291. sugov_get_util(&util, &max, sg_cpu->cpu);
  292. raw_spin_lock(&sg_policy->update_lock);
  293. sg_cpu->util = util;
  294. sg_cpu->max = max;
  295. sg_cpu->flags = flags;
  296. sugov_set_iowait_boost(sg_cpu, time);
  297. sg_cpu->last_update = time;
  298. if (sugov_should_update_freq(sg_policy, time)) {
  299. if (flags & SCHED_CPUFREQ_RT)
  300. next_f = sg_policy->policy->cpuinfo.max_freq;
  301. else
  302. next_f = sugov_next_freq_shared(sg_cpu, time);
  303. sugov_update_commit(sg_policy, time, next_f);
  304. }
  305. raw_spin_unlock(&sg_policy->update_lock);
  306. }
  307. static void sugov_work(struct kthread_work *work)
  308. {
  309. struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
  310. mutex_lock(&sg_policy->work_lock);
  311. __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
  312. CPUFREQ_RELATION_L);
  313. mutex_unlock(&sg_policy->work_lock);
  314. sg_policy->work_in_progress = false;
  315. }
  316. static void sugov_irq_work(struct irq_work *irq_work)
  317. {
  318. struct sugov_policy *sg_policy;
  319. sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
  320. /*
  321. * For RT tasks, the schedutil governor shoots the frequency to maximum.
  322. * Special care must be taken to ensure that this kthread doesn't result
  323. * in the same behavior.
  324. *
  325. * This is (mostly) guaranteed by the work_in_progress flag. The flag is
  326. * updated only at the end of the sugov_work() function and before that
  327. * the schedutil governor rejects all other frequency scaling requests.
  328. *
  329. * There is a very rare case though, where the RT thread yields right
  330. * after the work_in_progress flag is cleared. The effects of that are
  331. * neglected for now.
  332. */
  333. kthread_queue_work(&sg_policy->worker, &sg_policy->work);
  334. }
  335. /************************** sysfs interface ************************/
  336. static struct sugov_tunables *global_tunables;
  337. static DEFINE_MUTEX(global_tunables_lock);
  338. static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
  339. {
  340. return container_of(attr_set, struct sugov_tunables, attr_set);
  341. }
  342. static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
  343. {
  344. struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
  345. return sprintf(buf, "%u\n", tunables->rate_limit_us);
  346. }
  347. static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
  348. size_t count)
  349. {
  350. struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
  351. struct sugov_policy *sg_policy;
  352. unsigned int rate_limit_us;
  353. if (kstrtouint(buf, 10, &rate_limit_us))
  354. return -EINVAL;
  355. tunables->rate_limit_us = rate_limit_us;
  356. list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
  357. sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
  358. return count;
  359. }
  360. static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
  361. static struct attribute *sugov_attributes[] = {
  362. &rate_limit_us.attr,
  363. NULL
  364. };
  365. static struct kobj_type sugov_tunables_ktype = {
  366. .default_attrs = sugov_attributes,
  367. .sysfs_ops = &governor_sysfs_ops,
  368. };
  369. /********************** cpufreq governor interface *********************/
  370. static struct cpufreq_governor schedutil_gov;
  371. static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
  372. {
  373. struct sugov_policy *sg_policy;
  374. sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
  375. if (!sg_policy)
  376. return NULL;
  377. sg_policy->policy = policy;
  378. raw_spin_lock_init(&sg_policy->update_lock);
  379. return sg_policy;
  380. }
  381. static void sugov_policy_free(struct sugov_policy *sg_policy)
  382. {
  383. kfree(sg_policy);
  384. }
  385. static int sugov_kthread_create(struct sugov_policy *sg_policy)
  386. {
  387. struct task_struct *thread;
  388. struct sched_attr attr = {
  389. .size = sizeof(struct sched_attr),
  390. .sched_policy = SCHED_DEADLINE,
  391. .sched_flags = SCHED_FLAG_SUGOV,
  392. .sched_nice = 0,
  393. .sched_priority = 0,
  394. /*
  395. * Fake (unused) bandwidth; workaround to "fix"
  396. * priority inheritance.
  397. */
  398. .sched_runtime = 1000000,
  399. .sched_deadline = 10000000,
  400. .sched_period = 10000000,
  401. };
  402. struct cpufreq_policy *policy = sg_policy->policy;
  403. int ret;
  404. /* kthread only required for slow path */
  405. if (policy->fast_switch_enabled)
  406. return 0;
  407. kthread_init_work(&sg_policy->work, sugov_work);
  408. kthread_init_worker(&sg_policy->worker);
  409. thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
  410. "sugov:%d",
  411. cpumask_first(policy->related_cpus));
  412. if (IS_ERR(thread)) {
  413. pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
  414. return PTR_ERR(thread);
  415. }
  416. ret = sched_setattr_nocheck(thread, &attr);
  417. if (ret) {
  418. kthread_stop(thread);
  419. pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
  420. return ret;
  421. }
  422. sg_policy->thread = thread;
  423. /* Kthread is bound to all CPUs by default */
  424. if (!policy->dvfs_possible_from_any_cpu)
  425. kthread_bind_mask(thread, policy->related_cpus);
  426. init_irq_work(&sg_policy->irq_work, sugov_irq_work);
  427. mutex_init(&sg_policy->work_lock);
  428. wake_up_process(thread);
  429. return 0;
  430. }
  431. static void sugov_kthread_stop(struct sugov_policy *sg_policy)
  432. {
  433. /* kthread only required for slow path */
  434. if (sg_policy->policy->fast_switch_enabled)
  435. return;
  436. kthread_flush_worker(&sg_policy->worker);
  437. kthread_stop(sg_policy->thread);
  438. mutex_destroy(&sg_policy->work_lock);
  439. }
  440. static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
  441. {
  442. struct sugov_tunables *tunables;
  443. tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
  444. if (tunables) {
  445. gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
  446. if (!have_governor_per_policy())
  447. global_tunables = tunables;
  448. }
  449. return tunables;
  450. }
  451. static void sugov_tunables_free(struct sugov_tunables *tunables)
  452. {
  453. if (!have_governor_per_policy())
  454. global_tunables = NULL;
  455. kfree(tunables);
  456. }
  457. static int sugov_init(struct cpufreq_policy *policy)
  458. {
  459. struct sugov_policy *sg_policy;
  460. struct sugov_tunables *tunables;
  461. int ret = 0;
  462. /* State should be equivalent to EXIT */
  463. if (policy->governor_data)
  464. return -EBUSY;
  465. cpufreq_enable_fast_switch(policy);
  466. sg_policy = sugov_policy_alloc(policy);
  467. if (!sg_policy) {
  468. ret = -ENOMEM;
  469. goto disable_fast_switch;
  470. }
  471. ret = sugov_kthread_create(sg_policy);
  472. if (ret)
  473. goto free_sg_policy;
  474. mutex_lock(&global_tunables_lock);
  475. if (global_tunables) {
  476. if (WARN_ON(have_governor_per_policy())) {
  477. ret = -EINVAL;
  478. goto stop_kthread;
  479. }
  480. policy->governor_data = sg_policy;
  481. sg_policy->tunables = global_tunables;
  482. gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
  483. goto out;
  484. }
  485. tunables = sugov_tunables_alloc(sg_policy);
  486. if (!tunables) {
  487. ret = -ENOMEM;
  488. goto stop_kthread;
  489. }
  490. tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
  491. policy->governor_data = sg_policy;
  492. sg_policy->tunables = tunables;
  493. ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
  494. get_governor_parent_kobj(policy), "%s",
  495. schedutil_gov.name);
  496. if (ret)
  497. goto fail;
  498. out:
  499. mutex_unlock(&global_tunables_lock);
  500. return 0;
  501. fail:
  502. policy->governor_data = NULL;
  503. sugov_tunables_free(tunables);
  504. stop_kthread:
  505. sugov_kthread_stop(sg_policy);
  506. free_sg_policy:
  507. mutex_unlock(&global_tunables_lock);
  508. sugov_policy_free(sg_policy);
  509. disable_fast_switch:
  510. cpufreq_disable_fast_switch(policy);
  511. pr_err("initialization failed (error %d)\n", ret);
  512. return ret;
  513. }
  514. static void sugov_exit(struct cpufreq_policy *policy)
  515. {
  516. struct sugov_policy *sg_policy = policy->governor_data;
  517. struct sugov_tunables *tunables = sg_policy->tunables;
  518. unsigned int count;
  519. mutex_lock(&global_tunables_lock);
  520. count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
  521. policy->governor_data = NULL;
  522. if (!count)
  523. sugov_tunables_free(tunables);
  524. mutex_unlock(&global_tunables_lock);
  525. sugov_kthread_stop(sg_policy);
  526. sugov_policy_free(sg_policy);
  527. cpufreq_disable_fast_switch(policy);
  528. }
  529. static int sugov_start(struct cpufreq_policy *policy)
  530. {
  531. struct sugov_policy *sg_policy = policy->governor_data;
  532. unsigned int cpu;
  533. sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
  534. sg_policy->last_freq_update_time = 0;
  535. sg_policy->next_freq = UINT_MAX;
  536. sg_policy->work_in_progress = false;
  537. sg_policy->need_freq_update = false;
  538. sg_policy->cached_raw_freq = 0;
  539. for_each_cpu(cpu, policy->cpus) {
  540. struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
  541. memset(sg_cpu, 0, sizeof(*sg_cpu));
  542. sg_cpu->cpu = cpu;
  543. sg_cpu->sg_policy = sg_policy;
  544. sg_cpu->flags = 0;
  545. sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
  546. }
  547. for_each_cpu(cpu, policy->cpus) {
  548. struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
  549. cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
  550. policy_is_shared(policy) ?
  551. sugov_update_shared :
  552. sugov_update_single);
  553. }
  554. return 0;
  555. }
  556. static void sugov_stop(struct cpufreq_policy *policy)
  557. {
  558. struct sugov_policy *sg_policy = policy->governor_data;
  559. unsigned int cpu;
  560. for_each_cpu(cpu, policy->cpus)
  561. cpufreq_remove_update_util_hook(cpu);
  562. synchronize_sched();
  563. if (!policy->fast_switch_enabled) {
  564. irq_work_sync(&sg_policy->irq_work);
  565. kthread_cancel_work_sync(&sg_policy->work);
  566. }
  567. }
  568. static void sugov_limits(struct cpufreq_policy *policy)
  569. {
  570. struct sugov_policy *sg_policy = policy->governor_data;
  571. if (!policy->fast_switch_enabled) {
  572. mutex_lock(&sg_policy->work_lock);
  573. cpufreq_policy_apply_limits(policy);
  574. mutex_unlock(&sg_policy->work_lock);
  575. }
  576. sg_policy->need_freq_update = true;
  577. }
  578. static struct cpufreq_governor schedutil_gov = {
  579. .name = "schedutil",
  580. .owner = THIS_MODULE,
  581. .dynamic_switching = true,
  582. .init = sugov_init,
  583. .exit = sugov_exit,
  584. .start = sugov_start,
  585. .stop = sugov_stop,
  586. .limits = sugov_limits,
  587. };
  588. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
  589. struct cpufreq_governor *cpufreq_default_governor(void)
  590. {
  591. return &schedutil_gov;
  592. }
  593. #endif
  594. static int __init sugov_register(void)
  595. {
  596. return cpufreq_register_governor(&schedutil_gov);
  597. }
  598. fs_initcall(sugov_register);