cpufreq_governor.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * drivers/cpufreq/cpufreq_governor.c
  3. *
  4. * CPUFREQ governors common code
  5. *
  6. * Copyright (C) 2001 Russell King
  7. * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  8. * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
  9. * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
  10. * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/export.h>
  18. #include <linux/kernel_stat.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include "cpufreq_governor.h"
  22. static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
  23. static DEFINE_MUTEX(gov_dbs_data_mutex);
  24. /* Common sysfs tunables */
  25. /**
  26. * store_sampling_rate - update sampling rate effective immediately if needed.
  27. *
  28. * If new rate is smaller than the old, simply updating
  29. * dbs.sampling_rate might not be appropriate. For example, if the
  30. * original sampling_rate was 1 second and the requested new sampling rate is 10
  31. * ms because the user needs immediate reaction from ondemand governor, but not
  32. * sure if higher frequency will be required or not, then, the governor may
  33. * change the sampling rate too late; up to 1 second later. Thus, if we are
  34. * reducing the sampling rate, we need to make the new value effective
  35. * immediately.
  36. *
  37. * This must be called with dbs_data->mutex held, otherwise traversing
  38. * policy_dbs_list isn't safe.
  39. */
  40. ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
  41. size_t count)
  42. {
  43. struct policy_dbs_info *policy_dbs;
  44. unsigned int rate;
  45. int ret;
  46. ret = sscanf(buf, "%u", &rate);
  47. if (ret != 1)
  48. return -EINVAL;
  49. dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
  50. /*
  51. * We are operating under dbs_data->mutex and so the list and its
  52. * entries can't be freed concurrently.
  53. */
  54. list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
  55. mutex_lock(&policy_dbs->timer_mutex);
  56. /*
  57. * On 32-bit architectures this may race with the
  58. * sample_delay_ns read in dbs_update_util_handler(), but that
  59. * really doesn't matter. If the read returns a value that's
  60. * too big, the sample will be skipped, but the next invocation
  61. * of dbs_update_util_handler() (when the update has been
  62. * completed) will take a sample.
  63. *
  64. * If this runs in parallel with dbs_work_handler(), we may end
  65. * up overwriting the sample_delay_ns value that it has just
  66. * written, but it will be corrected next time a sample is
  67. * taken, so it shouldn't be significant.
  68. */
  69. gov_update_sample_delay(policy_dbs, 0);
  70. mutex_unlock(&policy_dbs->timer_mutex);
  71. }
  72. return count;
  73. }
  74. EXPORT_SYMBOL_GPL(store_sampling_rate);
  75. /**
  76. * gov_update_cpu_data - Update CPU load data.
  77. * @dbs_data: Top-level governor data pointer.
  78. *
  79. * Update CPU load data for all CPUs in the domain governed by @dbs_data
  80. * (that may be a single policy or a bunch of them if governor tunables are
  81. * system-wide).
  82. *
  83. * Call under the @dbs_data mutex.
  84. */
  85. void gov_update_cpu_data(struct dbs_data *dbs_data)
  86. {
  87. struct policy_dbs_info *policy_dbs;
  88. list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
  89. unsigned int j;
  90. for_each_cpu(j, policy_dbs->policy->cpus) {
  91. struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
  92. j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
  93. dbs_data->io_is_busy);
  94. if (dbs_data->ignore_nice_load)
  95. j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  96. }
  97. }
  98. }
  99. EXPORT_SYMBOL_GPL(gov_update_cpu_data);
  100. static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
  101. {
  102. return container_of(kobj, struct dbs_data, kobj);
  103. }
  104. static inline struct governor_attr *to_gov_attr(struct attribute *attr)
  105. {
  106. return container_of(attr, struct governor_attr, attr);
  107. }
  108. static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
  109. char *buf)
  110. {
  111. struct dbs_data *dbs_data = to_dbs_data(kobj);
  112. struct governor_attr *gattr = to_gov_attr(attr);
  113. return gattr->show(dbs_data, buf);
  114. }
  115. static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
  116. const char *buf, size_t count)
  117. {
  118. struct dbs_data *dbs_data = to_dbs_data(kobj);
  119. struct governor_attr *gattr = to_gov_attr(attr);
  120. int ret = -EBUSY;
  121. mutex_lock(&dbs_data->mutex);
  122. if (dbs_data->usage_count)
  123. ret = gattr->store(dbs_data, buf, count);
  124. mutex_unlock(&dbs_data->mutex);
  125. return ret;
  126. }
  127. /*
  128. * Sysfs Ops for accessing governor attributes.
  129. *
  130. * All show/store invocations for governor specific sysfs attributes, will first
  131. * call the below show/store callbacks and the attribute specific callback will
  132. * be called from within it.
  133. */
  134. static const struct sysfs_ops governor_sysfs_ops = {
  135. .show = governor_show,
  136. .store = governor_store,
  137. };
  138. unsigned int dbs_update(struct cpufreq_policy *policy)
  139. {
  140. struct policy_dbs_info *policy_dbs = policy->governor_data;
  141. struct dbs_data *dbs_data = policy_dbs->dbs_data;
  142. unsigned int ignore_nice = dbs_data->ignore_nice_load;
  143. unsigned int max_load = 0;
  144. unsigned int sampling_rate, io_busy, j;
  145. /*
  146. * Sometimes governors may use an additional multiplier to increase
  147. * sample delays temporarily. Apply that multiplier to sampling_rate
  148. * so as to keep the wake-up-from-idle detection logic a bit
  149. * conservative.
  150. */
  151. sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
  152. /*
  153. * For the purpose of ondemand, waiting for disk IO is an indication
  154. * that you're performance critical, and not that the system is actually
  155. * idle, so do not add the iowait time to the CPU idle time then.
  156. */
  157. io_busy = dbs_data->io_is_busy;
  158. /* Get Absolute Load */
  159. for_each_cpu(j, policy->cpus) {
  160. struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
  161. u64 cur_wall_time, cur_idle_time;
  162. unsigned int idle_time, wall_time;
  163. unsigned int load;
  164. cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
  165. wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
  166. j_cdbs->prev_cpu_wall = cur_wall_time;
  167. if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
  168. idle_time = 0;
  169. } else {
  170. idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
  171. j_cdbs->prev_cpu_idle = cur_idle_time;
  172. }
  173. if (ignore_nice) {
  174. u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  175. idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
  176. j_cdbs->prev_cpu_nice = cur_nice;
  177. }
  178. if (unlikely(!wall_time || wall_time < idle_time))
  179. continue;
  180. /*
  181. * If the CPU had gone completely idle, and a task just woke up
  182. * on this CPU now, it would be unfair to calculate 'load' the
  183. * usual way for this elapsed time-window, because it will show
  184. * near-zero load, irrespective of how CPU intensive that task
  185. * actually is. This is undesirable for latency-sensitive bursty
  186. * workloads.
  187. *
  188. * To avoid this, we reuse the 'load' from the previous
  189. * time-window and give this task a chance to start with a
  190. * reasonably high CPU frequency. (However, we shouldn't over-do
  191. * this copy, lest we get stuck at a high load (high frequency)
  192. * for too long, even when the current system load has actually
  193. * dropped down. So we perform the copy only once, upon the
  194. * first wake-up from idle.)
  195. *
  196. * Detecting this situation is easy: the governor's utilization
  197. * update handler would not have run during CPU-idle periods.
  198. * Hence, an unusually large 'wall_time' (as compared to the
  199. * sampling rate) indicates this scenario.
  200. *
  201. * prev_load can be zero in two cases and we must recalculate it
  202. * for both cases:
  203. * - during long idle intervals
  204. * - explicitly set to zero
  205. */
  206. if (unlikely(wall_time > (2 * sampling_rate) &&
  207. j_cdbs->prev_load)) {
  208. load = j_cdbs->prev_load;
  209. /*
  210. * Perform a destructive copy, to ensure that we copy
  211. * the previous load only once, upon the first wake-up
  212. * from idle.
  213. */
  214. j_cdbs->prev_load = 0;
  215. } else {
  216. load = 100 * (wall_time - idle_time) / wall_time;
  217. j_cdbs->prev_load = load;
  218. }
  219. if (load > max_load)
  220. max_load = load;
  221. }
  222. return max_load;
  223. }
  224. EXPORT_SYMBOL_GPL(dbs_update);
  225. static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
  226. unsigned int delay_us)
  227. {
  228. struct cpufreq_policy *policy = policy_dbs->policy;
  229. int cpu;
  230. gov_update_sample_delay(policy_dbs, delay_us);
  231. policy_dbs->last_sample_time = 0;
  232. for_each_cpu(cpu, policy->cpus) {
  233. struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
  234. cpufreq_set_update_util_data(cpu, &cdbs->update_util);
  235. }
  236. }
  237. static inline void gov_clear_update_util(struct cpufreq_policy *policy)
  238. {
  239. int i;
  240. for_each_cpu(i, policy->cpus)
  241. cpufreq_set_update_util_data(i, NULL);
  242. synchronize_sched();
  243. }
  244. static void gov_cancel_work(struct cpufreq_policy *policy)
  245. {
  246. struct policy_dbs_info *policy_dbs = policy->governor_data;
  247. gov_clear_update_util(policy_dbs->policy);
  248. irq_work_sync(&policy_dbs->irq_work);
  249. cancel_work_sync(&policy_dbs->work);
  250. atomic_set(&policy_dbs->work_count, 0);
  251. policy_dbs->work_in_progress = false;
  252. }
  253. static void dbs_work_handler(struct work_struct *work)
  254. {
  255. struct policy_dbs_info *policy_dbs;
  256. struct cpufreq_policy *policy;
  257. struct dbs_governor *gov;
  258. policy_dbs = container_of(work, struct policy_dbs_info, work);
  259. policy = policy_dbs->policy;
  260. gov = dbs_governor_of(policy);
  261. /*
  262. * Make sure cpufreq_governor_limits() isn't evaluating load or the
  263. * ondemand governor isn't updating the sampling rate in parallel.
  264. */
  265. mutex_lock(&policy_dbs->timer_mutex);
  266. gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
  267. mutex_unlock(&policy_dbs->timer_mutex);
  268. /* Allow the utilization update handler to queue up more work. */
  269. atomic_set(&policy_dbs->work_count, 0);
  270. /*
  271. * If the update below is reordered with respect to the sample delay
  272. * modification, the utilization update handler may end up using a stale
  273. * sample delay value.
  274. */
  275. smp_wmb();
  276. policy_dbs->work_in_progress = false;
  277. }
  278. static void dbs_irq_work(struct irq_work *irq_work)
  279. {
  280. struct policy_dbs_info *policy_dbs;
  281. policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
  282. schedule_work_on(smp_processor_id(), &policy_dbs->work);
  283. }
  284. static void dbs_update_util_handler(struct update_util_data *data, u64 time,
  285. unsigned long util, unsigned long max)
  286. {
  287. struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
  288. struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
  289. u64 delta_ns, lst;
  290. /*
  291. * The work may not be allowed to be queued up right now.
  292. * Possible reasons:
  293. * - Work has already been queued up or is in progress.
  294. * - It is too early (too little time from the previous sample).
  295. */
  296. if (policy_dbs->work_in_progress)
  297. return;
  298. /*
  299. * If the reads below are reordered before the check above, the value
  300. * of sample_delay_ns used in the computation may be stale.
  301. */
  302. smp_rmb();
  303. lst = READ_ONCE(policy_dbs->last_sample_time);
  304. delta_ns = time - lst;
  305. if ((s64)delta_ns < policy_dbs->sample_delay_ns)
  306. return;
  307. /*
  308. * If the policy is not shared, the irq_work may be queued up right away
  309. * at this point. Otherwise, we need to ensure that only one of the
  310. * CPUs sharing the policy will do that.
  311. */
  312. if (policy_dbs->is_shared) {
  313. if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
  314. return;
  315. /*
  316. * If another CPU updated last_sample_time in the meantime, we
  317. * shouldn't be here, so clear the work counter and bail out.
  318. */
  319. if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
  320. atomic_set(&policy_dbs->work_count, 0);
  321. return;
  322. }
  323. }
  324. policy_dbs->last_sample_time = time;
  325. policy_dbs->work_in_progress = true;
  326. irq_work_queue(&policy_dbs->irq_work);
  327. }
  328. static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
  329. struct dbs_governor *gov)
  330. {
  331. struct policy_dbs_info *policy_dbs;
  332. int j;
  333. /* Allocate memory for per-policy governor data. */
  334. policy_dbs = gov->alloc();
  335. if (!policy_dbs)
  336. return NULL;
  337. policy_dbs->policy = policy;
  338. mutex_init(&policy_dbs->timer_mutex);
  339. atomic_set(&policy_dbs->work_count, 0);
  340. init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
  341. INIT_WORK(&policy_dbs->work, dbs_work_handler);
  342. /* Set policy_dbs for all CPUs, online+offline */
  343. for_each_cpu(j, policy->related_cpus) {
  344. struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
  345. j_cdbs->policy_dbs = policy_dbs;
  346. j_cdbs->update_util.func = dbs_update_util_handler;
  347. }
  348. return policy_dbs;
  349. }
  350. static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
  351. struct dbs_governor *gov)
  352. {
  353. int j;
  354. mutex_destroy(&policy_dbs->timer_mutex);
  355. for_each_cpu(j, policy_dbs->policy->related_cpus) {
  356. struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
  357. j_cdbs->policy_dbs = NULL;
  358. j_cdbs->update_util.func = NULL;
  359. }
  360. gov->free(policy_dbs);
  361. }
  362. static int cpufreq_governor_init(struct cpufreq_policy *policy)
  363. {
  364. struct dbs_governor *gov = dbs_governor_of(policy);
  365. struct dbs_data *dbs_data;
  366. struct policy_dbs_info *policy_dbs;
  367. unsigned int latency;
  368. int ret = 0;
  369. /* State should be equivalent to EXIT */
  370. if (policy->governor_data)
  371. return -EBUSY;
  372. policy_dbs = alloc_policy_dbs_info(policy, gov);
  373. if (!policy_dbs)
  374. return -ENOMEM;
  375. /* Protect gov->gdbs_data against concurrent updates. */
  376. mutex_lock(&gov_dbs_data_mutex);
  377. dbs_data = gov->gdbs_data;
  378. if (dbs_data) {
  379. if (WARN_ON(have_governor_per_policy())) {
  380. ret = -EINVAL;
  381. goto free_policy_dbs_info;
  382. }
  383. policy_dbs->dbs_data = dbs_data;
  384. policy->governor_data = policy_dbs;
  385. mutex_lock(&dbs_data->mutex);
  386. dbs_data->usage_count++;
  387. list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
  388. mutex_unlock(&dbs_data->mutex);
  389. goto out;
  390. }
  391. dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
  392. if (!dbs_data) {
  393. ret = -ENOMEM;
  394. goto free_policy_dbs_info;
  395. }
  396. INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
  397. mutex_init(&dbs_data->mutex);
  398. ret = gov->init(dbs_data, !policy->governor->initialized);
  399. if (ret)
  400. goto free_policy_dbs_info;
  401. /* policy latency is in ns. Convert it to us first */
  402. latency = policy->cpuinfo.transition_latency / 1000;
  403. if (latency == 0)
  404. latency = 1;
  405. /* Bring kernel and HW constraints together */
  406. dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
  407. MIN_LATENCY_MULTIPLIER * latency);
  408. dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
  409. LATENCY_MULTIPLIER * latency);
  410. if (!have_governor_per_policy())
  411. gov->gdbs_data = dbs_data;
  412. policy->governor_data = policy_dbs;
  413. policy_dbs->dbs_data = dbs_data;
  414. dbs_data->usage_count = 1;
  415. list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
  416. gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
  417. ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
  418. get_governor_parent_kobj(policy),
  419. "%s", gov->gov.name);
  420. if (!ret)
  421. goto out;
  422. /* Failure, so roll back. */
  423. pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
  424. policy->governor_data = NULL;
  425. if (!have_governor_per_policy())
  426. gov->gdbs_data = NULL;
  427. gov->exit(dbs_data, !policy->governor->initialized);
  428. kfree(dbs_data);
  429. free_policy_dbs_info:
  430. free_policy_dbs_info(policy_dbs, gov);
  431. out:
  432. mutex_unlock(&gov_dbs_data_mutex);
  433. return ret;
  434. }
  435. static int cpufreq_governor_exit(struct cpufreq_policy *policy)
  436. {
  437. struct dbs_governor *gov = dbs_governor_of(policy);
  438. struct policy_dbs_info *policy_dbs = policy->governor_data;
  439. struct dbs_data *dbs_data = policy_dbs->dbs_data;
  440. int count;
  441. /* Protect gov->gdbs_data against concurrent updates. */
  442. mutex_lock(&gov_dbs_data_mutex);
  443. mutex_lock(&dbs_data->mutex);
  444. list_del(&policy_dbs->list);
  445. count = --dbs_data->usage_count;
  446. mutex_unlock(&dbs_data->mutex);
  447. if (!count) {
  448. kobject_put(&dbs_data->kobj);
  449. policy->governor_data = NULL;
  450. if (!have_governor_per_policy())
  451. gov->gdbs_data = NULL;
  452. gov->exit(dbs_data, policy->governor->initialized == 1);
  453. mutex_destroy(&dbs_data->mutex);
  454. kfree(dbs_data);
  455. } else {
  456. policy->governor_data = NULL;
  457. }
  458. free_policy_dbs_info(policy_dbs, gov);
  459. mutex_unlock(&gov_dbs_data_mutex);
  460. return 0;
  461. }
  462. static int cpufreq_governor_start(struct cpufreq_policy *policy)
  463. {
  464. struct dbs_governor *gov = dbs_governor_of(policy);
  465. struct policy_dbs_info *policy_dbs = policy->governor_data;
  466. struct dbs_data *dbs_data = policy_dbs->dbs_data;
  467. unsigned int sampling_rate, ignore_nice, j;
  468. unsigned int io_busy;
  469. if (!policy->cur)
  470. return -EINVAL;
  471. policy_dbs->is_shared = policy_is_shared(policy);
  472. policy_dbs->rate_mult = 1;
  473. sampling_rate = dbs_data->sampling_rate;
  474. ignore_nice = dbs_data->ignore_nice_load;
  475. io_busy = dbs_data->io_is_busy;
  476. for_each_cpu(j, policy->cpus) {
  477. struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
  478. unsigned int prev_load;
  479. j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
  480. prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
  481. j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
  482. if (ignore_nice)
  483. j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  484. }
  485. gov->start(policy);
  486. gov_set_update_util(policy_dbs, sampling_rate);
  487. return 0;
  488. }
  489. static int cpufreq_governor_stop(struct cpufreq_policy *policy)
  490. {
  491. gov_cancel_work(policy);
  492. return 0;
  493. }
  494. static int cpufreq_governor_limits(struct cpufreq_policy *policy)
  495. {
  496. struct policy_dbs_info *policy_dbs = policy->governor_data;
  497. mutex_lock(&policy_dbs->timer_mutex);
  498. if (policy->max < policy->cur)
  499. __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
  500. else if (policy->min > policy->cur)
  501. __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
  502. gov_update_sample_delay(policy_dbs, 0);
  503. mutex_unlock(&policy_dbs->timer_mutex);
  504. return 0;
  505. }
  506. int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
  507. {
  508. if (event == CPUFREQ_GOV_POLICY_INIT) {
  509. return cpufreq_governor_init(policy);
  510. } else if (policy->governor_data) {
  511. switch (event) {
  512. case CPUFREQ_GOV_POLICY_EXIT:
  513. return cpufreq_governor_exit(policy);
  514. case CPUFREQ_GOV_START:
  515. return cpufreq_governor_start(policy);
  516. case CPUFREQ_GOV_STOP:
  517. return cpufreq_governor_stop(policy);
  518. case CPUFREQ_GOV_LIMITS:
  519. return cpufreq_governor_limits(policy);
  520. }
  521. }
  522. return -EINVAL;
  523. }
  524. EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);