cpufreq.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /*
  2. * linux/include/linux/cpufreq.h
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef _LINUX_CPUFREQ_H
  12. #define _LINUX_CPUFREQ_H
  13. #include <linux/clk.h>
  14. #include <linux/cpumask.h>
  15. #include <linux/completion.h>
  16. #include <linux/kobject.h>
  17. #include <linux/notifier.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/sysfs.h>
  20. /*********************************************************************
  21. * CPUFREQ INTERFACE *
  22. *********************************************************************/
  23. /*
  24. * Frequency values here are CPU kHz
  25. *
  26. * Maximum transition latency is in nanoseconds - if it's unknown,
  27. * CPUFREQ_ETERNAL shall be used.
  28. */
  29. #define CPUFREQ_ETERNAL (-1)
  30. #define CPUFREQ_NAME_LEN 16
  31. /* Print length for names. Extra 1 space for accomodating '\n' in prints */
  32. #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
  33. struct cpufreq_governor;
  34. struct cpufreq_freqs {
  35. unsigned int cpu; /* cpu nr */
  36. unsigned int old;
  37. unsigned int new;
  38. u8 flags; /* flags of cpufreq_driver, see below. */
  39. };
  40. struct cpufreq_cpuinfo {
  41. unsigned int max_freq;
  42. unsigned int min_freq;
  43. /* in 10^(-9) s = nanoseconds */
  44. unsigned int transition_latency;
  45. };
  46. struct cpufreq_user_policy {
  47. unsigned int min; /* in kHz */
  48. unsigned int max; /* in kHz */
  49. };
  50. struct cpufreq_policy {
  51. /* CPUs sharing clock, require sw coordination */
  52. cpumask_var_t cpus; /* Online CPUs only */
  53. cpumask_var_t related_cpus; /* Online + Offline CPUs */
  54. cpumask_var_t real_cpus; /* Related and present */
  55. unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
  56. should set cpufreq */
  57. unsigned int cpu; /* cpu managing this policy, must be online */
  58. struct clk *clk;
  59. struct cpufreq_cpuinfo cpuinfo;/* see above */
  60. unsigned int min; /* in kHz */
  61. unsigned int max; /* in kHz */
  62. unsigned int cur; /* in kHz, only needed if cpufreq
  63. * governors are used */
  64. unsigned int restore_freq; /* = policy->cur before transition */
  65. unsigned int suspend_freq; /* freq to set during suspend */
  66. unsigned int policy; /* see above */
  67. unsigned int last_policy; /* policy before unplug */
  68. struct cpufreq_governor *governor; /* see below */
  69. void *governor_data;
  70. char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
  71. struct work_struct update; /* if update_policy() needs to be
  72. * called, but you're in IRQ context */
  73. struct cpufreq_user_policy user_policy;
  74. struct cpufreq_frequency_table *freq_table;
  75. struct list_head policy_list;
  76. struct kobject kobj;
  77. struct completion kobj_unregister;
  78. /*
  79. * The rules for this semaphore:
  80. * - Any routine that wants to read from the policy structure will
  81. * do a down_read on this semaphore.
  82. * - Any routine that will write to the policy structure and/or may take away
  83. * the policy altogether (eg. CPU hotplug), will hold this lock in write
  84. * mode before doing so.
  85. */
  86. struct rw_semaphore rwsem;
  87. /*
  88. * Fast switch flags:
  89. * - fast_switch_possible should be set by the driver if it can
  90. * guarantee that frequency can be changed on any CPU sharing the
  91. * policy and that the change will affect all of the policy CPUs then.
  92. * - fast_switch_enabled is to be set by governors that support fast
  93. * freqnency switching with the help of cpufreq_enable_fast_switch().
  94. */
  95. bool fast_switch_possible;
  96. bool fast_switch_enabled;
  97. /* Synchronization for frequency transitions */
  98. bool transition_ongoing; /* Tracks transition status */
  99. spinlock_t transition_lock;
  100. wait_queue_head_t transition_wait;
  101. struct task_struct *transition_task; /* Task which is doing the transition */
  102. /* cpufreq-stats */
  103. struct cpufreq_stats *stats;
  104. /* For cpufreq driver's internal use */
  105. void *driver_data;
  106. };
  107. /* Only for ACPI */
  108. #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
  109. #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
  110. #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
  111. #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
  112. #ifdef CONFIG_CPU_FREQ
  113. struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
  114. struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
  115. void cpufreq_cpu_put(struct cpufreq_policy *policy);
  116. #else
  117. static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
  118. {
  119. return NULL;
  120. }
  121. static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
  122. {
  123. return NULL;
  124. }
  125. static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
  126. #endif
  127. static inline bool policy_is_shared(struct cpufreq_policy *policy)
  128. {
  129. return cpumask_weight(policy->cpus) > 1;
  130. }
  131. /* /sys/devices/system/cpu/cpufreq: entry point for global variables */
  132. extern struct kobject *cpufreq_global_kobject;
  133. #ifdef CONFIG_CPU_FREQ
  134. unsigned int cpufreq_get(unsigned int cpu);
  135. unsigned int cpufreq_quick_get(unsigned int cpu);
  136. unsigned int cpufreq_quick_get_max(unsigned int cpu);
  137. void disable_cpufreq(void);
  138. u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
  139. int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
  140. int cpufreq_update_policy(unsigned int cpu);
  141. bool have_governor_per_policy(void);
  142. struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
  143. void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
  144. void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
  145. #else
  146. static inline unsigned int cpufreq_get(unsigned int cpu)
  147. {
  148. return 0;
  149. }
  150. static inline unsigned int cpufreq_quick_get(unsigned int cpu)
  151. {
  152. return 0;
  153. }
  154. static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
  155. {
  156. return 0;
  157. }
  158. static inline void disable_cpufreq(void) { }
  159. #endif
  160. #ifdef CONFIG_CPU_FREQ_STAT
  161. void cpufreq_stats_create_table(struct cpufreq_policy *policy);
  162. void cpufreq_stats_free_table(struct cpufreq_policy *policy);
  163. void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
  164. unsigned int new_freq);
  165. #else
  166. static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
  167. static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
  168. static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
  169. unsigned int new_freq) { }
  170. #endif /* CONFIG_CPU_FREQ_STAT */
  171. /*********************************************************************
  172. * CPUFREQ DRIVER INTERFACE *
  173. *********************************************************************/
  174. #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
  175. #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
  176. #define CPUFREQ_RELATION_C 2 /* closest frequency to target */
  177. struct freq_attr {
  178. struct attribute attr;
  179. ssize_t (*show)(struct cpufreq_policy *, char *);
  180. ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
  181. };
  182. #define cpufreq_freq_attr_ro(_name) \
  183. static struct freq_attr _name = \
  184. __ATTR(_name, 0444, show_##_name, NULL)
  185. #define cpufreq_freq_attr_ro_perm(_name, _perm) \
  186. static struct freq_attr _name = \
  187. __ATTR(_name, _perm, show_##_name, NULL)
  188. #define cpufreq_freq_attr_rw(_name) \
  189. static struct freq_attr _name = \
  190. __ATTR(_name, 0644, show_##_name, store_##_name)
  191. struct global_attr {
  192. struct attribute attr;
  193. ssize_t (*show)(struct kobject *kobj,
  194. struct attribute *attr, char *buf);
  195. ssize_t (*store)(struct kobject *a, struct attribute *b,
  196. const char *c, size_t count);
  197. };
  198. #define define_one_global_ro(_name) \
  199. static struct global_attr _name = \
  200. __ATTR(_name, 0444, show_##_name, NULL)
  201. #define define_one_global_rw(_name) \
  202. static struct global_attr _name = \
  203. __ATTR(_name, 0644, show_##_name, store_##_name)
  204. struct cpufreq_driver {
  205. char name[CPUFREQ_NAME_LEN];
  206. u8 flags;
  207. void *driver_data;
  208. /* needed by all drivers */
  209. int (*init)(struct cpufreq_policy *policy);
  210. int (*verify)(struct cpufreq_policy *policy);
  211. /* define one out of two */
  212. int (*setpolicy)(struct cpufreq_policy *policy);
  213. /*
  214. * On failure, should always restore frequency to policy->restore_freq
  215. * (i.e. old freq).
  216. */
  217. int (*target)(struct cpufreq_policy *policy,
  218. unsigned int target_freq,
  219. unsigned int relation); /* Deprecated */
  220. int (*target_index)(struct cpufreq_policy *policy,
  221. unsigned int index);
  222. unsigned int (*fast_switch)(struct cpufreq_policy *policy,
  223. unsigned int target_freq);
  224. /*
  225. * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
  226. * unset.
  227. *
  228. * get_intermediate should return a stable intermediate frequency
  229. * platform wants to switch to and target_intermediate() should set CPU
  230. * to to that frequency, before jumping to the frequency corresponding
  231. * to 'index'. Core will take care of sending notifications and driver
  232. * doesn't have to handle them in target_intermediate() or
  233. * target_index().
  234. *
  235. * Drivers can return '0' from get_intermediate() in case they don't
  236. * wish to switch to intermediate frequency for some target frequency.
  237. * In that case core will directly call ->target_index().
  238. */
  239. unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
  240. unsigned int index);
  241. int (*target_intermediate)(struct cpufreq_policy *policy,
  242. unsigned int index);
  243. /* should be defined, if possible */
  244. unsigned int (*get)(unsigned int cpu);
  245. /* optional */
  246. int (*bios_limit)(int cpu, unsigned int *limit);
  247. int (*exit)(struct cpufreq_policy *policy);
  248. void (*stop_cpu)(struct cpufreq_policy *policy);
  249. int (*suspend)(struct cpufreq_policy *policy);
  250. int (*resume)(struct cpufreq_policy *policy);
  251. /* Will be called after the driver is fully initialized */
  252. void (*ready)(struct cpufreq_policy *policy);
  253. struct freq_attr **attr;
  254. /* platform specific boost support code */
  255. bool boost_enabled;
  256. int (*set_boost)(int state);
  257. };
  258. /* flags */
  259. #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
  260. all ->init() calls failed */
  261. #define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
  262. kernel "constants" aren't
  263. affected by frequency
  264. transitions */
  265. #define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
  266. speed mismatches */
  267. /*
  268. * This should be set by platforms having multiple clock-domains, i.e.
  269. * supporting multiple policies. With this sysfs directories of governor would
  270. * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
  271. * governor with different tunables for different clusters.
  272. */
  273. #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
  274. /*
  275. * Driver will do POSTCHANGE notifications from outside of their ->target()
  276. * routine and so must set cpufreq_driver->flags with this flag, so that core
  277. * can handle them specially.
  278. */
  279. #define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
  280. /*
  281. * Set by drivers which want cpufreq core to check if CPU is running at a
  282. * frequency present in freq-table exposed by the driver. For these drivers if
  283. * CPU is found running at an out of table freq, we will try to set it to a freq
  284. * from the table. And if that fails, we will stop further boot process by
  285. * issuing a BUG_ON().
  286. */
  287. #define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
  288. int cpufreq_register_driver(struct cpufreq_driver *driver_data);
  289. int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
  290. const char *cpufreq_get_current_driver(void);
  291. void *cpufreq_get_driver_data(void);
  292. static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
  293. unsigned int min, unsigned int max)
  294. {
  295. if (policy->min < min)
  296. policy->min = min;
  297. if (policy->max < min)
  298. policy->max = min;
  299. if (policy->min > max)
  300. policy->min = max;
  301. if (policy->max > max)
  302. policy->max = max;
  303. if (policy->min > policy->max)
  304. policy->min = policy->max;
  305. return;
  306. }
  307. static inline void
  308. cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
  309. {
  310. cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
  311. policy->cpuinfo.max_freq);
  312. }
  313. #ifdef CONFIG_CPU_FREQ
  314. void cpufreq_suspend(void);
  315. void cpufreq_resume(void);
  316. int cpufreq_generic_suspend(struct cpufreq_policy *policy);
  317. #else
  318. static inline void cpufreq_suspend(void) {}
  319. static inline void cpufreq_resume(void) {}
  320. #endif
  321. /*********************************************************************
  322. * CPUFREQ NOTIFIER INTERFACE *
  323. *********************************************************************/
  324. #define CPUFREQ_TRANSITION_NOTIFIER (0)
  325. #define CPUFREQ_POLICY_NOTIFIER (1)
  326. /* Transition notifiers */
  327. #define CPUFREQ_PRECHANGE (0)
  328. #define CPUFREQ_POSTCHANGE (1)
  329. /* Policy Notifiers */
  330. #define CPUFREQ_ADJUST (0)
  331. #define CPUFREQ_NOTIFY (1)
  332. #define CPUFREQ_START (2)
  333. #define CPUFREQ_CREATE_POLICY (3)
  334. #define CPUFREQ_REMOVE_POLICY (4)
  335. #ifdef CONFIG_CPU_FREQ
  336. int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
  337. int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
  338. void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
  339. struct cpufreq_freqs *freqs);
  340. void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
  341. struct cpufreq_freqs *freqs, int transition_failed);
  342. #else /* CONFIG_CPU_FREQ */
  343. static inline int cpufreq_register_notifier(struct notifier_block *nb,
  344. unsigned int list)
  345. {
  346. return 0;
  347. }
  348. static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
  349. unsigned int list)
  350. {
  351. return 0;
  352. }
  353. #endif /* !CONFIG_CPU_FREQ */
  354. /**
  355. * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
  356. * safe)
  357. * @old: old value
  358. * @div: divisor
  359. * @mult: multiplier
  360. *
  361. *
  362. * new = old * mult / div
  363. */
  364. static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
  365. u_int mult)
  366. {
  367. #if BITS_PER_LONG == 32
  368. u64 result = ((u64) old) * ((u64) mult);
  369. do_div(result, div);
  370. return (unsigned long) result;
  371. #elif BITS_PER_LONG == 64
  372. unsigned long result = old * ((u64) mult);
  373. result /= div;
  374. return result;
  375. #endif
  376. }
  377. /*********************************************************************
  378. * CPUFREQ GOVERNORS *
  379. *********************************************************************/
  380. /*
  381. * If (cpufreq_driver->target) exists, the ->governor decides what frequency
  382. * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
  383. * two generic policies are available:
  384. */
  385. #define CPUFREQ_POLICY_POWERSAVE (1)
  386. #define CPUFREQ_POLICY_PERFORMANCE (2)
  387. /*
  388. * The polling frequency depends on the capability of the processor. Default
  389. * polling frequency is 1000 times the transition latency of the processor. The
  390. * ondemand governor will work on any processor with transition latency <= 10ms,
  391. * using appropriate sampling rate.
  392. *
  393. * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
  394. * the ondemand governor will not work. All times here are in us (microseconds).
  395. */
  396. #define MIN_SAMPLING_RATE_RATIO (2)
  397. #define LATENCY_MULTIPLIER (1000)
  398. #define MIN_LATENCY_MULTIPLIER (20)
  399. #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
  400. struct cpufreq_governor {
  401. char name[CPUFREQ_NAME_LEN];
  402. int (*init)(struct cpufreq_policy *policy);
  403. void (*exit)(struct cpufreq_policy *policy);
  404. int (*start)(struct cpufreq_policy *policy);
  405. void (*stop)(struct cpufreq_policy *policy);
  406. void (*limits)(struct cpufreq_policy *policy);
  407. ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
  408. char *buf);
  409. int (*store_setspeed) (struct cpufreq_policy *policy,
  410. unsigned int freq);
  411. unsigned int max_transition_latency; /* HW must be able to switch to
  412. next freq faster than this value in nano secs or we
  413. will fallback to performance governor */
  414. struct list_head governor_list;
  415. struct module *owner;
  416. };
  417. /* Pass a target to the cpufreq driver */
  418. unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
  419. unsigned int target_freq);
  420. int cpufreq_driver_target(struct cpufreq_policy *policy,
  421. unsigned int target_freq,
  422. unsigned int relation);
  423. int __cpufreq_driver_target(struct cpufreq_policy *policy,
  424. unsigned int target_freq,
  425. unsigned int relation);
  426. int cpufreq_register_governor(struct cpufreq_governor *governor);
  427. void cpufreq_unregister_governor(struct cpufreq_governor *governor);
  428. struct cpufreq_governor *cpufreq_default_governor(void);
  429. struct cpufreq_governor *cpufreq_fallback_governor(void);
  430. static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
  431. {
  432. if (policy->max < policy->cur)
  433. __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
  434. else if (policy->min > policy->cur)
  435. __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
  436. }
  437. /* Governor attribute set */
  438. struct gov_attr_set {
  439. struct kobject kobj;
  440. struct list_head policy_list;
  441. struct mutex update_lock;
  442. int usage_count;
  443. };
  444. /* sysfs ops for cpufreq governors */
  445. extern const struct sysfs_ops governor_sysfs_ops;
  446. void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
  447. void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
  448. unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
  449. /* Governor sysfs attribute */
  450. struct governor_attr {
  451. struct attribute attr;
  452. ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
  453. ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
  454. size_t count);
  455. };
  456. /*********************************************************************
  457. * FREQUENCY TABLE HELPERS *
  458. *********************************************************************/
  459. /* Special Values of .frequency field */
  460. #define CPUFREQ_ENTRY_INVALID ~0u
  461. #define CPUFREQ_TABLE_END ~1u
  462. /* Special Values of .flags field */
  463. #define CPUFREQ_BOOST_FREQ (1 << 0)
  464. struct cpufreq_frequency_table {
  465. unsigned int flags;
  466. unsigned int driver_data; /* driver specific data, not used by core */
  467. unsigned int frequency; /* kHz - doesn't need to be in ascending
  468. * order */
  469. };
  470. #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
  471. int dev_pm_opp_init_cpufreq_table(struct device *dev,
  472. struct cpufreq_frequency_table **table);
  473. void dev_pm_opp_free_cpufreq_table(struct device *dev,
  474. struct cpufreq_frequency_table **table);
  475. #else
  476. static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
  477. struct cpufreq_frequency_table
  478. **table)
  479. {
  480. return -EINVAL;
  481. }
  482. static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
  483. struct cpufreq_frequency_table
  484. **table)
  485. {
  486. }
  487. #endif
  488. /*
  489. * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
  490. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  491. * @table: the cpufreq_frequency_table * to iterate over.
  492. */
  493. #define cpufreq_for_each_entry(pos, table) \
  494. for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
  495. /*
  496. * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
  497. * excluding CPUFREQ_ENTRY_INVALID frequencies.
  498. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  499. * @table: the cpufreq_frequency_table * to iterate over.
  500. */
  501. #define cpufreq_for_each_valid_entry(pos, table) \
  502. for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
  503. if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
  504. continue; \
  505. else
  506. int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
  507. struct cpufreq_frequency_table *table);
  508. int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
  509. struct cpufreq_frequency_table *table);
  510. int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
  511. int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
  512. unsigned int target_freq,
  513. unsigned int relation);
  514. int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
  515. unsigned int freq);
  516. ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
  517. #ifdef CONFIG_CPU_FREQ
  518. int cpufreq_boost_trigger_state(int state);
  519. int cpufreq_boost_enabled(void);
  520. int cpufreq_enable_boost_support(void);
  521. bool policy_has_boost_freq(struct cpufreq_policy *policy);
  522. #else
  523. static inline int cpufreq_boost_trigger_state(int state)
  524. {
  525. return 0;
  526. }
  527. static inline int cpufreq_boost_enabled(void)
  528. {
  529. return 0;
  530. }
  531. static inline int cpufreq_enable_boost_support(void)
  532. {
  533. return -EINVAL;
  534. }
  535. static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
  536. {
  537. return false;
  538. }
  539. #endif
  540. /* the following are really really optional */
  541. extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
  542. extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
  543. extern struct freq_attr *cpufreq_generic_attr[];
  544. int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
  545. struct cpufreq_frequency_table *table);
  546. unsigned int cpufreq_generic_get(unsigned int cpu);
  547. int cpufreq_generic_init(struct cpufreq_policy *policy,
  548. struct cpufreq_frequency_table *table,
  549. unsigned int transition_latency);
  550. #endif /* _LINUX_CPUFREQ_H */