cpufreq_conservative.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. /*
  2. * drivers/cpufreq/cpufreq_conservative.c
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  6. * Jun Nakajima <jun.nakajima@intel.com>
  7. * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/slab.h>
  14. #include "cpufreq_governor.h"
  15. /* Conservative governor macros */
  16. #define DEF_FREQUENCY_UP_THRESHOLD (80)
  17. #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
  18. #define DEF_FREQUENCY_STEP (5)
  19. #define DEF_SAMPLING_DOWN_FACTOR (1)
  20. #define MAX_SAMPLING_DOWN_FACTOR (10)
  21. static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
  22. static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
  23. struct cpufreq_policy *policy)
  24. {
  25. unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
  26. /* max freq cannot be less than 100. But who knows... */
  27. if (unlikely(freq_target == 0))
  28. freq_target = DEF_FREQUENCY_STEP;
  29. return freq_target;
  30. }
  31. /*
  32. * Every sampling_rate, we check, if current idle time is less than 20%
  33. * (default), then we try to increase frequency. Every sampling_rate *
  34. * sampling_down_factor, we check, if current idle time is more than 80%
  35. * (default), then we try to decrease frequency
  36. *
  37. * Any frequency increase takes it to the maximum frequency. Frequency reduction
  38. * happens at minimum steps of 5% (default) of maximum frequency
  39. */
  40. static void cs_check_cpu(int cpu, unsigned int load)
  41. {
  42. struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
  43. struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
  44. struct dbs_data *dbs_data = policy->governor_data;
  45. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  46. /*
  47. * break out if we 'cannot' reduce the speed as the user might
  48. * want freq_step to be zero
  49. */
  50. if (cs_tuners->freq_step == 0)
  51. return;
  52. /* Check for frequency increase */
  53. if (load > cs_tuners->up_threshold) {
  54. dbs_info->down_skip = 0;
  55. /* if we are already at full speed then break out early */
  56. if (dbs_info->requested_freq == policy->max)
  57. return;
  58. dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
  59. if (dbs_info->requested_freq > policy->max)
  60. dbs_info->requested_freq = policy->max;
  61. __cpufreq_driver_target(policy, dbs_info->requested_freq,
  62. CPUFREQ_RELATION_H);
  63. return;
  64. }
  65. /* if sampling_down_factor is active break out early */
  66. if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
  67. return;
  68. dbs_info->down_skip = 0;
  69. /* Check for frequency decrease */
  70. if (load < cs_tuners->down_threshold) {
  71. unsigned int freq_target;
  72. /*
  73. * if we cannot reduce the frequency anymore, break out early
  74. */
  75. if (policy->cur == policy->min)
  76. return;
  77. freq_target = get_freq_target(cs_tuners, policy);
  78. if (dbs_info->requested_freq > freq_target)
  79. dbs_info->requested_freq -= freq_target;
  80. else
  81. dbs_info->requested_freq = policy->min;
  82. __cpufreq_driver_target(policy, dbs_info->requested_freq,
  83. CPUFREQ_RELATION_L);
  84. return;
  85. }
  86. }
  87. static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
  88. struct dbs_data *dbs_data, bool modify_all)
  89. {
  90. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  91. if (modify_all)
  92. dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);
  93. return delay_for_sampling_rate(cs_tuners->sampling_rate);
  94. }
  95. static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  96. void *data)
  97. {
  98. struct cpufreq_freqs *freq = data;
  99. struct cs_cpu_dbs_info_s *dbs_info =
  100. &per_cpu(cs_cpu_dbs_info, freq->cpu);
  101. struct cpufreq_policy *policy;
  102. if (!dbs_info->enable)
  103. return 0;
  104. policy = dbs_info->cdbs.shared->policy;
  105. /*
  106. * we only care if our internally tracked freq moves outside the 'valid'
  107. * ranges of frequency available to us otherwise we do not change it
  108. */
  109. if (dbs_info->requested_freq > policy->max
  110. || dbs_info->requested_freq < policy->min)
  111. dbs_info->requested_freq = freq->new;
  112. return 0;
  113. }
  114. static struct notifier_block cs_cpufreq_notifier_block = {
  115. .notifier_call = dbs_cpufreq_notifier,
  116. };
  117. /************************** sysfs interface ************************/
  118. static struct common_dbs_data cs_dbs_cdata;
  119. static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
  120. const char *buf, size_t count)
  121. {
  122. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  123. unsigned int input;
  124. int ret;
  125. ret = sscanf(buf, "%u", &input);
  126. if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
  127. return -EINVAL;
  128. cs_tuners->sampling_down_factor = input;
  129. return count;
  130. }
  131. static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
  132. size_t count)
  133. {
  134. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  135. unsigned int input;
  136. int ret;
  137. ret = sscanf(buf, "%u", &input);
  138. if (ret != 1)
  139. return -EINVAL;
  140. cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
  141. return count;
  142. }
  143. static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
  144. size_t count)
  145. {
  146. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  147. unsigned int input;
  148. int ret;
  149. ret = sscanf(buf, "%u", &input);
  150. if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
  151. return -EINVAL;
  152. cs_tuners->up_threshold = input;
  153. return count;
  154. }
  155. static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
  156. size_t count)
  157. {
  158. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  159. unsigned int input;
  160. int ret;
  161. ret = sscanf(buf, "%u", &input);
  162. /* cannot be lower than 11 otherwise freq will not fall */
  163. if (ret != 1 || input < 11 || input > 100 ||
  164. input >= cs_tuners->up_threshold)
  165. return -EINVAL;
  166. cs_tuners->down_threshold = input;
  167. return count;
  168. }
  169. static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
  170. const char *buf, size_t count)
  171. {
  172. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  173. unsigned int input, j;
  174. int ret;
  175. ret = sscanf(buf, "%u", &input);
  176. if (ret != 1)
  177. return -EINVAL;
  178. if (input > 1)
  179. input = 1;
  180. if (input == cs_tuners->ignore_nice_load) /* nothing to do */
  181. return count;
  182. cs_tuners->ignore_nice_load = input;
  183. /* we need to re-evaluate prev_cpu_idle */
  184. for_each_online_cpu(j) {
  185. struct cs_cpu_dbs_info_s *dbs_info;
  186. dbs_info = &per_cpu(cs_cpu_dbs_info, j);
  187. dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
  188. &dbs_info->cdbs.prev_cpu_wall, 0);
  189. if (cs_tuners->ignore_nice_load)
  190. dbs_info->cdbs.prev_cpu_nice =
  191. kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  192. }
  193. return count;
  194. }
  195. static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
  196. size_t count)
  197. {
  198. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  199. unsigned int input;
  200. int ret;
  201. ret = sscanf(buf, "%u", &input);
  202. if (ret != 1)
  203. return -EINVAL;
  204. if (input > 100)
  205. input = 100;
  206. /*
  207. * no need to test here if freq_step is zero as the user might actually
  208. * want this, they would be crazy though :)
  209. */
  210. cs_tuners->freq_step = input;
  211. return count;
  212. }
  213. show_store_one(cs, sampling_rate);
  214. show_store_one(cs, sampling_down_factor);
  215. show_store_one(cs, up_threshold);
  216. show_store_one(cs, down_threshold);
  217. show_store_one(cs, ignore_nice_load);
  218. show_store_one(cs, freq_step);
  219. declare_show_sampling_rate_min(cs);
  220. gov_sys_pol_attr_rw(sampling_rate);
  221. gov_sys_pol_attr_rw(sampling_down_factor);
  222. gov_sys_pol_attr_rw(up_threshold);
  223. gov_sys_pol_attr_rw(down_threshold);
  224. gov_sys_pol_attr_rw(ignore_nice_load);
  225. gov_sys_pol_attr_rw(freq_step);
  226. gov_sys_pol_attr_ro(sampling_rate_min);
  227. static struct attribute *dbs_attributes_gov_sys[] = {
  228. &sampling_rate_min_gov_sys.attr,
  229. &sampling_rate_gov_sys.attr,
  230. &sampling_down_factor_gov_sys.attr,
  231. &up_threshold_gov_sys.attr,
  232. &down_threshold_gov_sys.attr,
  233. &ignore_nice_load_gov_sys.attr,
  234. &freq_step_gov_sys.attr,
  235. NULL
  236. };
  237. static struct attribute_group cs_attr_group_gov_sys = {
  238. .attrs = dbs_attributes_gov_sys,
  239. .name = "conservative",
  240. };
  241. static struct attribute *dbs_attributes_gov_pol[] = {
  242. &sampling_rate_min_gov_pol.attr,
  243. &sampling_rate_gov_pol.attr,
  244. &sampling_down_factor_gov_pol.attr,
  245. &up_threshold_gov_pol.attr,
  246. &down_threshold_gov_pol.attr,
  247. &ignore_nice_load_gov_pol.attr,
  248. &freq_step_gov_pol.attr,
  249. NULL
  250. };
  251. static struct attribute_group cs_attr_group_gov_pol = {
  252. .attrs = dbs_attributes_gov_pol,
  253. .name = "conservative",
  254. };
  255. /************************** sysfs end ************************/
  256. static int cs_init(struct dbs_data *dbs_data, bool notify)
  257. {
  258. struct cs_dbs_tuners *tuners;
  259. tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
  260. if (!tuners) {
  261. pr_err("%s: kzalloc failed\n", __func__);
  262. return -ENOMEM;
  263. }
  264. tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
  265. tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
  266. tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
  267. tuners->ignore_nice_load = 0;
  268. tuners->freq_step = DEF_FREQUENCY_STEP;
  269. dbs_data->tuners = tuners;
  270. dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
  271. jiffies_to_usecs(10);
  272. if (notify)
  273. cpufreq_register_notifier(&cs_cpufreq_notifier_block,
  274. CPUFREQ_TRANSITION_NOTIFIER);
  275. return 0;
  276. }
  277. static void cs_exit(struct dbs_data *dbs_data, bool notify)
  278. {
  279. if (notify)
  280. cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
  281. CPUFREQ_TRANSITION_NOTIFIER);
  282. kfree(dbs_data->tuners);
  283. }
  284. define_get_cpu_dbs_routines(cs_cpu_dbs_info);
  285. static struct common_dbs_data cs_dbs_cdata = {
  286. .governor = GOV_CONSERVATIVE,
  287. .attr_group_gov_sys = &cs_attr_group_gov_sys,
  288. .attr_group_gov_pol = &cs_attr_group_gov_pol,
  289. .get_cpu_cdbs = get_cpu_cdbs,
  290. .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
  291. .gov_dbs_timer = cs_dbs_timer,
  292. .gov_check_cpu = cs_check_cpu,
  293. .init = cs_init,
  294. .exit = cs_exit,
  295. .mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex),
  296. };
  297. static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
  298. unsigned int event)
  299. {
  300. return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
  301. }
  302. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
  303. static
  304. #endif
  305. struct cpufreq_governor cpufreq_gov_conservative = {
  306. .name = "conservative",
  307. .governor = cs_cpufreq_governor_dbs,
  308. .max_transition_latency = TRANSITION_LATENCY_LIMIT,
  309. .owner = THIS_MODULE,
  310. };
  311. static int __init cpufreq_gov_dbs_init(void)
  312. {
  313. return cpufreq_register_governor(&cpufreq_gov_conservative);
  314. }
  315. static void __exit cpufreq_gov_dbs_exit(void)
  316. {
  317. cpufreq_unregister_governor(&cpufreq_gov_conservative);
  318. }
  319. MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
  320. MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
  321. "Low Latency Frequency Transition capable processors "
  322. "optimised for use in a battery environment");
  323. MODULE_LICENSE("GPL");
  324. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
  325. fs_initcall(cpufreq_gov_dbs_init);
  326. #else
  327. module_init(cpufreq_gov_dbs_init);
  328. #endif
  329. module_exit(cpufreq_gov_dbs_exit);