cpufreq.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. /*
  2. * linux/include/linux/cpufreq.h
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef _LINUX_CPUFREQ_H
  12. #define _LINUX_CPUFREQ_H
  13. #include <linux/clk.h>
  14. #include <linux/cpumask.h>
  15. #include <linux/completion.h>
  16. #include <linux/kobject.h>
  17. #include <linux/notifier.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/sysfs.h>
  20. /*********************************************************************
  21. * CPUFREQ INTERFACE *
  22. *********************************************************************/
  23. /*
  24. * Frequency values here are CPU kHz
  25. *
  26. * Maximum transition latency is in nanoseconds - if it's unknown,
  27. * CPUFREQ_ETERNAL shall be used.
  28. */
  29. #define CPUFREQ_ETERNAL (-1)
  30. #define CPUFREQ_NAME_LEN 16
  31. /* Print length for names. Extra 1 space for accommodating '\n' in prints */
  32. #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
  33. struct cpufreq_governor;
  34. enum cpufreq_table_sorting {
  35. CPUFREQ_TABLE_UNSORTED,
  36. CPUFREQ_TABLE_SORTED_ASCENDING,
  37. CPUFREQ_TABLE_SORTED_DESCENDING
  38. };
  39. struct cpufreq_freqs {
  40. unsigned int cpu; /* cpu nr */
  41. unsigned int old;
  42. unsigned int new;
  43. u8 flags; /* flags of cpufreq_driver, see below. */
  44. };
  45. struct cpufreq_cpuinfo {
  46. unsigned int max_freq;
  47. unsigned int min_freq;
  48. /* in 10^(-9) s = nanoseconds */
  49. unsigned int transition_latency;
  50. };
  51. struct cpufreq_user_policy {
  52. unsigned int min; /* in kHz */
  53. unsigned int max; /* in kHz */
  54. };
  55. struct cpufreq_policy {
  56. /* CPUs sharing clock, require sw coordination */
  57. cpumask_var_t cpus; /* Online CPUs only */
  58. cpumask_var_t related_cpus; /* Online + Offline CPUs */
  59. cpumask_var_t real_cpus; /* Related and present */
  60. unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
  61. should set cpufreq */
  62. unsigned int cpu; /* cpu managing this policy, must be online */
  63. struct clk *clk;
  64. struct cpufreq_cpuinfo cpuinfo;/* see above */
  65. unsigned int min; /* in kHz */
  66. unsigned int max; /* in kHz */
  67. unsigned int cur; /* in kHz, only needed if cpufreq
  68. * governors are used */
  69. unsigned int restore_freq; /* = policy->cur before transition */
  70. unsigned int suspend_freq; /* freq to set during suspend */
  71. unsigned int policy; /* see above */
  72. unsigned int last_policy; /* policy before unplug */
  73. struct cpufreq_governor *governor; /* see below */
  74. void *governor_data;
  75. char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
  76. struct work_struct update; /* if update_policy() needs to be
  77. * called, but you're in IRQ context */
  78. struct cpufreq_user_policy user_policy;
  79. struct cpufreq_frequency_table *freq_table;
  80. enum cpufreq_table_sorting freq_table_sorted;
  81. struct list_head policy_list;
  82. struct kobject kobj;
  83. struct completion kobj_unregister;
  84. /*
  85. * The rules for this semaphore:
  86. * - Any routine that wants to read from the policy structure will
  87. * do a down_read on this semaphore.
  88. * - Any routine that will write to the policy structure and/or may take away
  89. * the policy altogether (eg. CPU hotplug), will hold this lock in write
  90. * mode before doing so.
  91. */
  92. struct rw_semaphore rwsem;
  93. /*
  94. * Fast switch flags:
  95. * - fast_switch_possible should be set by the driver if it can
  96. * guarantee that frequency can be changed on any CPU sharing the
  97. * policy and that the change will affect all of the policy CPUs then.
  98. * - fast_switch_enabled is to be set by governors that support fast
  99. * frequency switching with the help of cpufreq_enable_fast_switch().
  100. */
  101. bool fast_switch_possible;
  102. bool fast_switch_enabled;
  103. /*
  104. * Preferred average time interval between consecutive invocations of
  105. * the driver to set the frequency for this policy. To be set by the
  106. * scaling driver (0, which is the default, means no preference).
  107. */
  108. unsigned int transition_delay_us;
  109. /*
  110. * Remote DVFS flag (Not added to the driver structure as we don't want
  111. * to access another structure from scheduler hotpath).
  112. *
  113. * Should be set if CPUs can do DVFS on behalf of other CPUs from
  114. * different cpufreq policies.
  115. */
  116. bool dvfs_possible_from_any_cpu;
  117. /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
  118. unsigned int cached_target_freq;
  119. int cached_resolved_idx;
  120. /* Synchronization for frequency transitions */
  121. bool transition_ongoing; /* Tracks transition status */
  122. spinlock_t transition_lock;
  123. wait_queue_head_t transition_wait;
  124. struct task_struct *transition_task; /* Task which is doing the transition */
  125. /* cpufreq-stats */
  126. struct cpufreq_stats *stats;
  127. /* For cpufreq driver's internal use */
  128. void *driver_data;
  129. };
  130. /* Only for ACPI */
  131. #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
  132. #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
  133. #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
  134. #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
  135. #ifdef CONFIG_CPU_FREQ
  136. struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
  137. struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
  138. void cpufreq_cpu_put(struct cpufreq_policy *policy);
  139. #else
  140. static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
  141. {
  142. return NULL;
  143. }
  144. static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
  145. {
  146. return NULL;
  147. }
  148. static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
  149. #endif
  150. static inline bool policy_is_shared(struct cpufreq_policy *policy)
  151. {
  152. return cpumask_weight(policy->cpus) > 1;
  153. }
  154. /* /sys/devices/system/cpu/cpufreq: entry point for global variables */
  155. extern struct kobject *cpufreq_global_kobject;
  156. #ifdef CONFIG_CPU_FREQ
  157. unsigned int cpufreq_get(unsigned int cpu);
  158. unsigned int cpufreq_quick_get(unsigned int cpu);
  159. unsigned int cpufreq_quick_get_max(unsigned int cpu);
  160. void disable_cpufreq(void);
  161. u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
  162. int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
  163. void cpufreq_update_policy(unsigned int cpu);
  164. bool have_governor_per_policy(void);
  165. struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
  166. void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
  167. void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
  168. #else
  169. static inline unsigned int cpufreq_get(unsigned int cpu)
  170. {
  171. return 0;
  172. }
  173. static inline unsigned int cpufreq_quick_get(unsigned int cpu)
  174. {
  175. return 0;
  176. }
  177. static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
  178. {
  179. return 0;
  180. }
  181. static inline void disable_cpufreq(void) { }
  182. #endif
  183. #ifdef CONFIG_CPU_FREQ_STAT
  184. void cpufreq_stats_create_table(struct cpufreq_policy *policy);
  185. void cpufreq_stats_free_table(struct cpufreq_policy *policy);
  186. void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
  187. unsigned int new_freq);
  188. #else
  189. static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
  190. static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
  191. static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
  192. unsigned int new_freq) { }
  193. #endif /* CONFIG_CPU_FREQ_STAT */
  194. /*********************************************************************
  195. * CPUFREQ DRIVER INTERFACE *
  196. *********************************************************************/
  197. #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
  198. #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
  199. #define CPUFREQ_RELATION_C 2 /* closest frequency to target */
  200. struct freq_attr {
  201. struct attribute attr;
  202. ssize_t (*show)(struct cpufreq_policy *, char *);
  203. ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
  204. };
  205. #define cpufreq_freq_attr_ro(_name) \
  206. static struct freq_attr _name = \
  207. __ATTR(_name, 0444, show_##_name, NULL)
  208. #define cpufreq_freq_attr_ro_perm(_name, _perm) \
  209. static struct freq_attr _name = \
  210. __ATTR(_name, _perm, show_##_name, NULL)
  211. #define cpufreq_freq_attr_rw(_name) \
  212. static struct freq_attr _name = \
  213. __ATTR(_name, 0644, show_##_name, store_##_name)
  214. #define cpufreq_freq_attr_wo(_name) \
  215. static struct freq_attr _name = \
  216. __ATTR(_name, 0200, NULL, store_##_name)
  217. struct global_attr {
  218. struct attribute attr;
  219. ssize_t (*show)(struct kobject *kobj,
  220. struct attribute *attr, char *buf);
  221. ssize_t (*store)(struct kobject *a, struct attribute *b,
  222. const char *c, size_t count);
  223. };
  224. #define define_one_global_ro(_name) \
  225. static struct global_attr _name = \
  226. __ATTR(_name, 0444, show_##_name, NULL)
  227. #define define_one_global_rw(_name) \
  228. static struct global_attr _name = \
  229. __ATTR(_name, 0644, show_##_name, store_##_name)
  230. struct cpufreq_driver {
  231. char name[CPUFREQ_NAME_LEN];
  232. u8 flags;
  233. void *driver_data;
  234. /* needed by all drivers */
  235. int (*init)(struct cpufreq_policy *policy);
  236. int (*verify)(struct cpufreq_policy *policy);
  237. /* define one out of two */
  238. int (*setpolicy)(struct cpufreq_policy *policy);
  239. /*
  240. * On failure, should always restore frequency to policy->restore_freq
  241. * (i.e. old freq).
  242. */
  243. int (*target)(struct cpufreq_policy *policy,
  244. unsigned int target_freq,
  245. unsigned int relation); /* Deprecated */
  246. int (*target_index)(struct cpufreq_policy *policy,
  247. unsigned int index);
  248. unsigned int (*fast_switch)(struct cpufreq_policy *policy,
  249. unsigned int target_freq);
  250. /*
  251. * Caches and returns the lowest driver-supported frequency greater than
  252. * or equal to the target frequency, subject to any driver limitations.
  253. * Does not set the frequency. Only to be implemented for drivers with
  254. * target().
  255. */
  256. unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
  257. unsigned int target_freq);
  258. /*
  259. * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
  260. * unset.
  261. *
  262. * get_intermediate should return a stable intermediate frequency
  263. * platform wants to switch to and target_intermediate() should set CPU
  264. * to to that frequency, before jumping to the frequency corresponding
  265. * to 'index'. Core will take care of sending notifications and driver
  266. * doesn't have to handle them in target_intermediate() or
  267. * target_index().
  268. *
  269. * Drivers can return '0' from get_intermediate() in case they don't
  270. * wish to switch to intermediate frequency for some target frequency.
  271. * In that case core will directly call ->target_index().
  272. */
  273. unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
  274. unsigned int index);
  275. int (*target_intermediate)(struct cpufreq_policy *policy,
  276. unsigned int index);
  277. /* should be defined, if possible */
  278. unsigned int (*get)(unsigned int cpu);
  279. /* optional */
  280. int (*bios_limit)(int cpu, unsigned int *limit);
  281. int (*exit)(struct cpufreq_policy *policy);
  282. void (*stop_cpu)(struct cpufreq_policy *policy);
  283. int (*suspend)(struct cpufreq_policy *policy);
  284. int (*resume)(struct cpufreq_policy *policy);
  285. /* Will be called after the driver is fully initialized */
  286. void (*ready)(struct cpufreq_policy *policy);
  287. struct freq_attr **attr;
  288. /* platform specific boost support code */
  289. bool boost_enabled;
  290. int (*set_boost)(int state);
  291. };
  292. /* flags */
  293. #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
  294. all ->init() calls failed */
  295. #define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
  296. kernel "constants" aren't
  297. affected by frequency
  298. transitions */
  299. #define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
  300. speed mismatches */
  301. /*
  302. * This should be set by platforms having multiple clock-domains, i.e.
  303. * supporting multiple policies. With this sysfs directories of governor would
  304. * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
  305. * governor with different tunables for different clusters.
  306. */
  307. #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
  308. /*
  309. * Driver will do POSTCHANGE notifications from outside of their ->target()
  310. * routine and so must set cpufreq_driver->flags with this flag, so that core
  311. * can handle them specially.
  312. */
  313. #define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
  314. /*
  315. * Set by drivers which want cpufreq core to check if CPU is running at a
  316. * frequency present in freq-table exposed by the driver. For these drivers if
  317. * CPU is found running at an out of table freq, we will try to set it to a freq
  318. * from the table. And if that fails, we will stop further boot process by
  319. * issuing a BUG_ON().
  320. */
  321. #define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
  322. /*
  323. * Set by drivers to disallow use of governors with "dynamic_switching" flag
  324. * set.
  325. */
  326. #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6)
  327. int cpufreq_register_driver(struct cpufreq_driver *driver_data);
  328. int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
  329. const char *cpufreq_get_current_driver(void);
  330. void *cpufreq_get_driver_data(void);
  331. static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
  332. unsigned int min, unsigned int max)
  333. {
  334. if (policy->min < min)
  335. policy->min = min;
  336. if (policy->max < min)
  337. policy->max = min;
  338. if (policy->min > max)
  339. policy->min = max;
  340. if (policy->max > max)
  341. policy->max = max;
  342. if (policy->min > policy->max)
  343. policy->min = policy->max;
  344. return;
  345. }
  346. static inline void
  347. cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
  348. {
  349. cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
  350. policy->cpuinfo.max_freq);
  351. }
  352. #ifdef CONFIG_CPU_FREQ
  353. void cpufreq_suspend(void);
  354. void cpufreq_resume(void);
  355. int cpufreq_generic_suspend(struct cpufreq_policy *policy);
  356. #else
  357. static inline void cpufreq_suspend(void) {}
  358. static inline void cpufreq_resume(void) {}
  359. #endif
  360. /*********************************************************************
  361. * CPUFREQ NOTIFIER INTERFACE *
  362. *********************************************************************/
  363. #define CPUFREQ_TRANSITION_NOTIFIER (0)
  364. #define CPUFREQ_POLICY_NOTIFIER (1)
  365. /* Transition notifiers */
  366. #define CPUFREQ_PRECHANGE (0)
  367. #define CPUFREQ_POSTCHANGE (1)
  368. /* Policy Notifiers */
  369. #define CPUFREQ_ADJUST (0)
  370. #define CPUFREQ_NOTIFY (1)
  371. #ifdef CONFIG_CPU_FREQ
  372. int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
  373. int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
  374. void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
  375. struct cpufreq_freqs *freqs);
  376. void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
  377. struct cpufreq_freqs *freqs, int transition_failed);
  378. #else /* CONFIG_CPU_FREQ */
  379. static inline int cpufreq_register_notifier(struct notifier_block *nb,
  380. unsigned int list)
  381. {
  382. return 0;
  383. }
  384. static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
  385. unsigned int list)
  386. {
  387. return 0;
  388. }
  389. #endif /* !CONFIG_CPU_FREQ */
  390. /**
  391. * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
  392. * safe)
  393. * @old: old value
  394. * @div: divisor
  395. * @mult: multiplier
  396. *
  397. *
  398. * new = old * mult / div
  399. */
  400. static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
  401. u_int mult)
  402. {
  403. #if BITS_PER_LONG == 32
  404. u64 result = ((u64) old) * ((u64) mult);
  405. do_div(result, div);
  406. return (unsigned long) result;
  407. #elif BITS_PER_LONG == 64
  408. unsigned long result = old * ((u64) mult);
  409. result /= div;
  410. return result;
  411. #endif
  412. }
  413. /*********************************************************************
  414. * CPUFREQ GOVERNORS *
  415. *********************************************************************/
  416. /*
  417. * If (cpufreq_driver->target) exists, the ->governor decides what frequency
  418. * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
  419. * two generic policies are available:
  420. */
  421. #define CPUFREQ_POLICY_POWERSAVE (1)
  422. #define CPUFREQ_POLICY_PERFORMANCE (2)
  423. /*
  424. * The polling frequency depends on the capability of the processor. Default
  425. * polling frequency is 1000 times the transition latency of the processor. The
  426. * ondemand governor will work on any processor with transition latency <= 10ms,
  427. * using appropriate sampling rate.
  428. */
  429. #define LATENCY_MULTIPLIER (1000)
  430. struct cpufreq_governor {
  431. char name[CPUFREQ_NAME_LEN];
  432. int (*init)(struct cpufreq_policy *policy);
  433. void (*exit)(struct cpufreq_policy *policy);
  434. int (*start)(struct cpufreq_policy *policy);
  435. void (*stop)(struct cpufreq_policy *policy);
  436. void (*limits)(struct cpufreq_policy *policy);
  437. ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
  438. char *buf);
  439. int (*store_setspeed) (struct cpufreq_policy *policy,
  440. unsigned int freq);
  441. /* For governors which change frequency dynamically by themselves */
  442. bool dynamic_switching;
  443. struct list_head governor_list;
  444. struct module *owner;
  445. };
  446. /* Pass a target to the cpufreq driver */
  447. unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
  448. unsigned int target_freq);
  449. int cpufreq_driver_target(struct cpufreq_policy *policy,
  450. unsigned int target_freq,
  451. unsigned int relation);
  452. int __cpufreq_driver_target(struct cpufreq_policy *policy,
  453. unsigned int target_freq,
  454. unsigned int relation);
  455. unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
  456. unsigned int target_freq);
  457. unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
  458. int cpufreq_register_governor(struct cpufreq_governor *governor);
  459. void cpufreq_unregister_governor(struct cpufreq_governor *governor);
  460. struct cpufreq_governor *cpufreq_default_governor(void);
  461. struct cpufreq_governor *cpufreq_fallback_governor(void);
  462. static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
  463. {
  464. if (policy->max < policy->cur)
  465. __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
  466. else if (policy->min > policy->cur)
  467. __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
  468. }
  469. /* Governor attribute set */
  470. struct gov_attr_set {
  471. struct kobject kobj;
  472. struct list_head policy_list;
  473. struct mutex update_lock;
  474. int usage_count;
  475. };
  476. /* sysfs ops for cpufreq governors */
  477. extern const struct sysfs_ops governor_sysfs_ops;
  478. void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
  479. void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
  480. unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
  481. /* Governor sysfs attribute */
  482. struct governor_attr {
  483. struct attribute attr;
  484. ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
  485. ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
  486. size_t count);
  487. };
  488. static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
  489. {
  490. /*
  491. * Allow remote callbacks if:
  492. * - dvfs_possible_from_any_cpu flag is set
  493. * - the local and remote CPUs share cpufreq policy
  494. */
  495. return policy->dvfs_possible_from_any_cpu ||
  496. cpumask_test_cpu(smp_processor_id(), policy->cpus);
  497. }
  498. /*********************************************************************
  499. * FREQUENCY TABLE HELPERS *
  500. *********************************************************************/
  501. /* Special Values of .frequency field */
  502. #define CPUFREQ_ENTRY_INVALID ~0u
  503. #define CPUFREQ_TABLE_END ~1u
  504. /* Special Values of .flags field */
  505. #define CPUFREQ_BOOST_FREQ (1 << 0)
  506. struct cpufreq_frequency_table {
  507. unsigned int flags;
  508. unsigned int driver_data; /* driver specific data, not used by core */
  509. unsigned int frequency; /* kHz - doesn't need to be in ascending
  510. * order */
  511. };
  512. #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
  513. int dev_pm_opp_init_cpufreq_table(struct device *dev,
  514. struct cpufreq_frequency_table **table);
  515. void dev_pm_opp_free_cpufreq_table(struct device *dev,
  516. struct cpufreq_frequency_table **table);
  517. #else
  518. static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
  519. struct cpufreq_frequency_table
  520. **table)
  521. {
  522. return -EINVAL;
  523. }
  524. static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
  525. struct cpufreq_frequency_table
  526. **table)
  527. {
  528. }
  529. #endif
  530. /*
  531. * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
  532. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  533. * @table: the cpufreq_frequency_table * to iterate over.
  534. */
  535. #define cpufreq_for_each_entry(pos, table) \
  536. for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
  537. /*
  538. * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table
  539. * with index
  540. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  541. * @table: the cpufreq_frequency_table * to iterate over.
  542. * @idx: the table entry currently being processed
  543. */
  544. #define cpufreq_for_each_entry_idx(pos, table, idx) \
  545. for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
  546. pos++, idx++)
  547. /*
  548. * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
  549. * excluding CPUFREQ_ENTRY_INVALID frequencies.
  550. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  551. * @table: the cpufreq_frequency_table * to iterate over.
  552. */
  553. #define cpufreq_for_each_valid_entry(pos, table) \
  554. for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
  555. if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
  556. continue; \
  557. else
  558. /*
  559. * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq
  560. * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies.
  561. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  562. * @table: the cpufreq_frequency_table * to iterate over.
  563. * @idx: the table entry currently being processed
  564. */
  565. #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
  566. cpufreq_for_each_entry_idx(pos, table, idx) \
  567. if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
  568. continue; \
  569. else
  570. int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
  571. struct cpufreq_frequency_table *table);
  572. int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
  573. struct cpufreq_frequency_table *table);
  574. int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
  575. int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
  576. unsigned int target_freq,
  577. unsigned int relation);
  578. int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
  579. unsigned int freq);
  580. ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
  581. #ifdef CONFIG_CPU_FREQ
  582. int cpufreq_boost_trigger_state(int state);
  583. int cpufreq_boost_enabled(void);
  584. int cpufreq_enable_boost_support(void);
  585. bool policy_has_boost_freq(struct cpufreq_policy *policy);
  586. /* Find lowest freq at or above target in a table in ascending order */
  587. static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
  588. unsigned int target_freq)
  589. {
  590. struct cpufreq_frequency_table *table = policy->freq_table;
  591. struct cpufreq_frequency_table *pos;
  592. unsigned int freq;
  593. int idx, best = -1;
  594. cpufreq_for_each_valid_entry_idx(pos, table, idx) {
  595. freq = pos->frequency;
  596. if (freq >= target_freq)
  597. return idx;
  598. best = idx;
  599. }
  600. return best;
  601. }
  602. /* Find lowest freq at or above target in a table in descending order */
  603. static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
  604. unsigned int target_freq)
  605. {
  606. struct cpufreq_frequency_table *table = policy->freq_table;
  607. struct cpufreq_frequency_table *pos;
  608. unsigned int freq;
  609. int idx, best = -1;
  610. cpufreq_for_each_valid_entry_idx(pos, table, idx) {
  611. freq = pos->frequency;
  612. if (freq == target_freq)
  613. return idx;
  614. if (freq > target_freq) {
  615. best = idx;
  616. continue;
  617. }
  618. /* No freq found above target_freq */
  619. if (best == -1)
  620. return idx;
  621. return best;
  622. }
  623. return best;
  624. }
  625. /* Works only on sorted freq-tables */
  626. static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
  627. unsigned int target_freq)
  628. {
  629. target_freq = clamp_val(target_freq, policy->min, policy->max);
  630. if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
  631. return cpufreq_table_find_index_al(policy, target_freq);
  632. else
  633. return cpufreq_table_find_index_dl(policy, target_freq);
  634. }
  635. /* Find highest freq at or below target in a table in ascending order */
  636. static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
  637. unsigned int target_freq)
  638. {
  639. struct cpufreq_frequency_table *table = policy->freq_table;
  640. struct cpufreq_frequency_table *pos;
  641. unsigned int freq;
  642. int idx, best = -1;
  643. cpufreq_for_each_valid_entry_idx(pos, table, idx) {
  644. freq = pos->frequency;
  645. if (freq == target_freq)
  646. return idx;
  647. if (freq < target_freq) {
  648. best = idx;
  649. continue;
  650. }
  651. /* No freq found below target_freq */
  652. if (best == -1)
  653. return idx;
  654. return best;
  655. }
  656. return best;
  657. }
  658. /* Find highest freq at or below target in a table in descending order */
  659. static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
  660. unsigned int target_freq)
  661. {
  662. struct cpufreq_frequency_table *table = policy->freq_table;
  663. struct cpufreq_frequency_table *pos;
  664. unsigned int freq;
  665. int idx, best = -1;
  666. cpufreq_for_each_valid_entry_idx(pos, table, idx) {
  667. freq = pos->frequency;
  668. if (freq <= target_freq)
  669. return idx;
  670. best = idx;
  671. }
  672. return best;
  673. }
  674. /* Works only on sorted freq-tables */
  675. static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
  676. unsigned int target_freq)
  677. {
  678. target_freq = clamp_val(target_freq, policy->min, policy->max);
  679. if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
  680. return cpufreq_table_find_index_ah(policy, target_freq);
  681. else
  682. return cpufreq_table_find_index_dh(policy, target_freq);
  683. }
  684. /* Find closest freq to target in a table in ascending order */
  685. static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
  686. unsigned int target_freq)
  687. {
  688. struct cpufreq_frequency_table *table = policy->freq_table;
  689. struct cpufreq_frequency_table *pos;
  690. unsigned int freq;
  691. int idx, best = -1;
  692. cpufreq_for_each_valid_entry_idx(pos, table, idx) {
  693. freq = pos->frequency;
  694. if (freq == target_freq)
  695. return idx;
  696. if (freq < target_freq) {
  697. best = idx;
  698. continue;
  699. }
  700. /* No freq found below target_freq */
  701. if (best == -1)
  702. return idx;
  703. /* Choose the closest freq */
  704. if (target_freq - table[best].frequency > freq - target_freq)
  705. return idx;
  706. return best;
  707. }
  708. return best;
  709. }
  710. /* Find closest freq to target in a table in descending order */
  711. static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
  712. unsigned int target_freq)
  713. {
  714. struct cpufreq_frequency_table *table = policy->freq_table;
  715. struct cpufreq_frequency_table *pos;
  716. unsigned int freq;
  717. int idx, best = -1;
  718. cpufreq_for_each_valid_entry_idx(pos, table, idx) {
  719. freq = pos->frequency;
  720. if (freq == target_freq)
  721. return idx;
  722. if (freq > target_freq) {
  723. best = idx;
  724. continue;
  725. }
  726. /* No freq found above target_freq */
  727. if (best == -1)
  728. return idx;
  729. /* Choose the closest freq */
  730. if (table[best].frequency - target_freq > target_freq - freq)
  731. return idx;
  732. return best;
  733. }
  734. return best;
  735. }
  736. /* Works only on sorted freq-tables */
  737. static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
  738. unsigned int target_freq)
  739. {
  740. target_freq = clamp_val(target_freq, policy->min, policy->max);
  741. if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
  742. return cpufreq_table_find_index_ac(policy, target_freq);
  743. else
  744. return cpufreq_table_find_index_dc(policy, target_freq);
  745. }
  746. static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
  747. unsigned int target_freq,
  748. unsigned int relation)
  749. {
  750. if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
  751. return cpufreq_table_index_unsorted(policy, target_freq,
  752. relation);
  753. switch (relation) {
  754. case CPUFREQ_RELATION_L:
  755. return cpufreq_table_find_index_l(policy, target_freq);
  756. case CPUFREQ_RELATION_H:
  757. return cpufreq_table_find_index_h(policy, target_freq);
  758. case CPUFREQ_RELATION_C:
  759. return cpufreq_table_find_index_c(policy, target_freq);
  760. default:
  761. pr_err("%s: Invalid relation: %d\n", __func__, relation);
  762. return -EINVAL;
  763. }
  764. }
  765. static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
  766. {
  767. struct cpufreq_frequency_table *pos;
  768. int count = 0;
  769. if (unlikely(!policy->freq_table))
  770. return 0;
  771. cpufreq_for_each_valid_entry(pos, policy->freq_table)
  772. count++;
  773. return count;
  774. }
  775. #else
  776. static inline int cpufreq_boost_trigger_state(int state)
  777. {
  778. return 0;
  779. }
  780. static inline int cpufreq_boost_enabled(void)
  781. {
  782. return 0;
  783. }
  784. static inline int cpufreq_enable_boost_support(void)
  785. {
  786. return -EINVAL;
  787. }
  788. static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
  789. {
  790. return false;
  791. }
  792. #endif
  793. extern void arch_freq_prepare_all(void);
  794. extern unsigned int arch_freq_get_on_cpu(int cpu);
  795. extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
  796. unsigned long max_freq);
  797. /* the following are really really optional */
  798. extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
  799. extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
  800. extern struct freq_attr *cpufreq_generic_attr[];
  801. int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
  802. struct cpufreq_frequency_table *table);
  803. int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
  804. unsigned int cpufreq_generic_get(unsigned int cpu);
  805. int cpufreq_generic_init(struct cpufreq_policy *policy,
  806. struct cpufreq_frequency_table *table,
  807. unsigned int transition_latency);
  808. #endif /* _LINUX_CPUFREQ_H */