intel_pstate.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359
  1. /*
  2. * intel_pstate.c: Native P state management for Intel processors
  3. *
  4. * (C) Copyright 2012 Intel Corporation
  5. * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; version 2
  10. * of the License.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/kernel_stat.h>
  15. #include <linux/module.h>
  16. #include <linux/ktime.h>
  17. #include <linux/hrtimer.h>
  18. #include <linux/tick.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched/cpufreq.h>
  21. #include <linux/list.h>
  22. #include <linux/cpu.h>
  23. #include <linux/cpufreq.h>
  24. #include <linux/sysfs.h>
  25. #include <linux/types.h>
  26. #include <linux/fs.h>
  27. #include <linux/debugfs.h>
  28. #include <linux/acpi.h>
  29. #include <linux/vmalloc.h>
  30. #include <trace/events/power.h>
  31. #include <asm/div64.h>
  32. #include <asm/msr.h>
  33. #include <asm/cpu_device_id.h>
  34. #include <asm/cpufeature.h>
  35. #include <asm/intel-family.h>
  36. #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
  37. #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
  38. #define INTEL_CPUFREQ_TRANSITION_DELAY 500
  39. #ifdef CONFIG_ACPI
  40. #include <acpi/processor.h>
  41. #include <acpi/cppc_acpi.h>
  42. #endif
  43. #define FRAC_BITS 8
  44. #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
  45. #define fp_toint(X) ((X) >> FRAC_BITS)
  46. #define EXT_BITS 6
  47. #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
  48. #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
  49. #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
  50. static inline int32_t mul_fp(int32_t x, int32_t y)
  51. {
  52. return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
  53. }
  54. static inline int32_t div_fp(s64 x, s64 y)
  55. {
  56. return div64_s64((int64_t)x << FRAC_BITS, y);
  57. }
  58. static inline int ceiling_fp(int32_t x)
  59. {
  60. int mask, ret;
  61. ret = fp_toint(x);
  62. mask = (1 << FRAC_BITS) - 1;
  63. if (x & mask)
  64. ret += 1;
  65. return ret;
  66. }
  67. static inline int32_t percent_fp(int percent)
  68. {
  69. return div_fp(percent, 100);
  70. }
  71. static inline u64 mul_ext_fp(u64 x, u64 y)
  72. {
  73. return (x * y) >> EXT_FRAC_BITS;
  74. }
  75. static inline u64 div_ext_fp(u64 x, u64 y)
  76. {
  77. return div64_u64(x << EXT_FRAC_BITS, y);
  78. }
  79. static inline int32_t percent_ext_fp(int percent)
  80. {
  81. return div_ext_fp(percent, 100);
  82. }
  83. /**
  84. * struct sample - Store performance sample
  85. * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
  86. * performance during last sample period
  87. * @busy_scaled: Scaled busy value which is used to calculate next
  88. * P state. This can be different than core_avg_perf
  89. * to account for cpu idle period
  90. * @aperf: Difference of actual performance frequency clock count
  91. * read from APERF MSR between last and current sample
  92. * @mperf: Difference of maximum performance frequency clock count
  93. * read from MPERF MSR between last and current sample
  94. * @tsc: Difference of time stamp counter between last and
  95. * current sample
  96. * @time: Current time from scheduler
  97. *
  98. * This structure is used in the cpudata structure to store performance sample
  99. * data for choosing next P State.
  100. */
  101. struct sample {
  102. int32_t core_avg_perf;
  103. int32_t busy_scaled;
  104. u64 aperf;
  105. u64 mperf;
  106. u64 tsc;
  107. u64 time;
  108. };
  109. /**
  110. * struct pstate_data - Store P state data
  111. * @current_pstate: Current requested P state
  112. * @min_pstate: Min P state possible for this platform
  113. * @max_pstate: Max P state possible for this platform
  114. * @max_pstate_physical:This is physical Max P state for a processor
  115. * This can be higher than the max_pstate which can
  116. * be limited by platform thermal design power limits
  117. * @scaling: Scaling factor to convert frequency to cpufreq
  118. * frequency units
  119. * @turbo_pstate: Max Turbo P state possible for this platform
  120. * @max_freq: @max_pstate frequency in cpufreq units
  121. * @turbo_freq: @turbo_pstate frequency in cpufreq units
  122. *
  123. * Stores the per cpu model P state limits and current P state.
  124. */
  125. struct pstate_data {
  126. int current_pstate;
  127. int min_pstate;
  128. int max_pstate;
  129. int max_pstate_physical;
  130. int scaling;
  131. int turbo_pstate;
  132. unsigned int max_freq;
  133. unsigned int turbo_freq;
  134. };
  135. /**
  136. * struct vid_data - Stores voltage information data
  137. * @min: VID data for this platform corresponding to
  138. * the lowest P state
  139. * @max: VID data corresponding to the highest P State.
  140. * @turbo: VID data for turbo P state
  141. * @ratio: Ratio of (vid max - vid min) /
  142. * (max P state - Min P State)
  143. *
  144. * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
  145. * This data is used in Atom platforms, where in addition to target P state,
  146. * the voltage data needs to be specified to select next P State.
  147. */
  148. struct vid_data {
  149. int min;
  150. int max;
  151. int turbo;
  152. int32_t ratio;
  153. };
  154. /**
  155. * struct global_params - Global parameters, mostly tunable via sysfs.
  156. * @no_turbo: Whether or not to use turbo P-states.
  157. * @turbo_disabled: Whethet or not turbo P-states are available at all,
  158. * based on the MSR_IA32_MISC_ENABLE value and whether or
  159. * not the maximum reported turbo P-state is different from
  160. * the maximum reported non-turbo one.
  161. * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
  162. * P-state capacity.
  163. * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
  164. * P-state capacity.
  165. */
  166. struct global_params {
  167. bool no_turbo;
  168. bool turbo_disabled;
  169. int max_perf_pct;
  170. int min_perf_pct;
  171. };
  172. /**
  173. * struct cpudata - Per CPU instance data storage
  174. * @cpu: CPU number for this instance data
  175. * @policy: CPUFreq policy value
  176. * @update_util: CPUFreq utility callback information
  177. * @update_util_set: CPUFreq utility callback is set
  178. * @iowait_boost: iowait-related boost fraction
  179. * @last_update: Time of the last update.
  180. * @pstate: Stores P state limits for this CPU
  181. * @vid: Stores VID limits for this CPU
  182. * @last_sample_time: Last Sample time
  183. * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
  184. * This shift is a multiplier to mperf delta to
  185. * calculate CPU busy.
  186. * @prev_aperf: Last APERF value read from APERF MSR
  187. * @prev_mperf: Last MPERF value read from MPERF MSR
  188. * @prev_tsc: Last timestamp counter (TSC) value
  189. * @prev_cummulative_iowait: IO Wait time difference from last and
  190. * current sample
  191. * @sample: Storage for storing last Sample data
  192. * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
  193. * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
  194. * @acpi_perf_data: Stores ACPI perf information read from _PSS
  195. * @valid_pss_table: Set to true for valid ACPI _PSS entries found
  196. * @epp_powersave: Last saved HWP energy performance preference
  197. * (EPP) or energy performance bias (EPB),
  198. * when policy switched to performance
  199. * @epp_policy: Last saved policy used to set EPP/EPB
  200. * @epp_default: Power on default HWP energy performance
  201. * preference/bias
  202. * @epp_saved: Saved EPP/EPB during system suspend or CPU offline
  203. * operation
  204. *
  205. * This structure stores per CPU instance data for all CPUs.
  206. */
  207. struct cpudata {
  208. int cpu;
  209. unsigned int policy;
  210. struct update_util_data update_util;
  211. bool update_util_set;
  212. struct pstate_data pstate;
  213. struct vid_data vid;
  214. u64 last_update;
  215. u64 last_sample_time;
  216. u64 aperf_mperf_shift;
  217. u64 prev_aperf;
  218. u64 prev_mperf;
  219. u64 prev_tsc;
  220. u64 prev_cummulative_iowait;
  221. struct sample sample;
  222. int32_t min_perf_ratio;
  223. int32_t max_perf_ratio;
  224. #ifdef CONFIG_ACPI
  225. struct acpi_processor_performance acpi_perf_data;
  226. bool valid_pss_table;
  227. #endif
  228. unsigned int iowait_boost;
  229. s16 epp_powersave;
  230. s16 epp_policy;
  231. s16 epp_default;
  232. s16 epp_saved;
  233. };
  234. static struct cpudata **all_cpu_data;
  235. /**
  236. * struct pstate_funcs - Per CPU model specific callbacks
  237. * @get_max: Callback to get maximum non turbo effective P state
  238. * @get_max_physical: Callback to get maximum non turbo physical P state
  239. * @get_min: Callback to get minimum P state
  240. * @get_turbo: Callback to get turbo P state
  241. * @get_scaling: Callback to get frequency scaling factor
  242. * @get_val: Callback to convert P state to actual MSR write value
  243. * @get_vid: Callback to get VID data for Atom platforms
  244. *
  245. * Core and Atom CPU models have different way to get P State limits. This
  246. * structure is used to store those callbacks.
  247. */
  248. struct pstate_funcs {
  249. int (*get_max)(void);
  250. int (*get_max_physical)(void);
  251. int (*get_min)(void);
  252. int (*get_turbo)(void);
  253. int (*get_scaling)(void);
  254. int (*get_aperf_mperf_shift)(void);
  255. u64 (*get_val)(struct cpudata*, int pstate);
  256. void (*get_vid)(struct cpudata *);
  257. };
  258. static struct pstate_funcs pstate_funcs __read_mostly;
  259. static int hwp_active __read_mostly;
  260. static bool per_cpu_limits __read_mostly;
  261. static struct cpufreq_driver *intel_pstate_driver __read_mostly;
  262. #ifdef CONFIG_ACPI
  263. static bool acpi_ppc;
  264. #endif
  265. static struct global_params global;
  266. static DEFINE_MUTEX(intel_pstate_driver_lock);
  267. static DEFINE_MUTEX(intel_pstate_limits_lock);
  268. #ifdef CONFIG_ACPI
  269. static bool intel_pstate_get_ppc_enable_status(void)
  270. {
  271. if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
  272. acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
  273. return true;
  274. return acpi_ppc;
  275. }
  276. #ifdef CONFIG_ACPI_CPPC_LIB
  277. /* The work item is needed to avoid CPU hotplug locking issues */
  278. static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
  279. {
  280. sched_set_itmt_support();
  281. }
  282. static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
  283. static void intel_pstate_set_itmt_prio(int cpu)
  284. {
  285. struct cppc_perf_caps cppc_perf;
  286. static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
  287. int ret;
  288. ret = cppc_get_perf_caps(cpu, &cppc_perf);
  289. if (ret)
  290. return;
  291. /*
  292. * The priorities can be set regardless of whether or not
  293. * sched_set_itmt_support(true) has been called and it is valid to
  294. * update them at any time after it has been called.
  295. */
  296. sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
  297. if (max_highest_perf <= min_highest_perf) {
  298. if (cppc_perf.highest_perf > max_highest_perf)
  299. max_highest_perf = cppc_perf.highest_perf;
  300. if (cppc_perf.highest_perf < min_highest_perf)
  301. min_highest_perf = cppc_perf.highest_perf;
  302. if (max_highest_perf > min_highest_perf) {
  303. /*
  304. * This code can be run during CPU online under the
  305. * CPU hotplug locks, so sched_set_itmt_support()
  306. * cannot be called from here. Queue up a work item
  307. * to invoke it.
  308. */
  309. schedule_work(&sched_itmt_work);
  310. }
  311. }
  312. }
  313. #else
  314. static void intel_pstate_set_itmt_prio(int cpu)
  315. {
  316. }
  317. #endif
  318. static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
  319. {
  320. struct cpudata *cpu;
  321. int ret;
  322. int i;
  323. if (hwp_active) {
  324. intel_pstate_set_itmt_prio(policy->cpu);
  325. return;
  326. }
  327. if (!intel_pstate_get_ppc_enable_status())
  328. return;
  329. cpu = all_cpu_data[policy->cpu];
  330. ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
  331. policy->cpu);
  332. if (ret)
  333. return;
  334. /*
  335. * Check if the control value in _PSS is for PERF_CTL MSR, which should
  336. * guarantee that the states returned by it map to the states in our
  337. * list directly.
  338. */
  339. if (cpu->acpi_perf_data.control_register.space_id !=
  340. ACPI_ADR_SPACE_FIXED_HARDWARE)
  341. goto err;
  342. /*
  343. * If there is only one entry _PSS, simply ignore _PSS and continue as
  344. * usual without taking _PSS into account
  345. */
  346. if (cpu->acpi_perf_data.state_count < 2)
  347. goto err;
  348. pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
  349. for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
  350. pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
  351. (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
  352. (u32) cpu->acpi_perf_data.states[i].core_frequency,
  353. (u32) cpu->acpi_perf_data.states[i].power,
  354. (u32) cpu->acpi_perf_data.states[i].control);
  355. }
  356. /*
  357. * The _PSS table doesn't contain whole turbo frequency range.
  358. * This just contains +1 MHZ above the max non turbo frequency,
  359. * with control value corresponding to max turbo ratio. But
  360. * when cpufreq set policy is called, it will call with this
  361. * max frequency, which will cause a reduced performance as
  362. * this driver uses real max turbo frequency as the max
  363. * frequency. So correct this frequency in _PSS table to
  364. * correct max turbo frequency based on the turbo state.
  365. * Also need to convert to MHz as _PSS freq is in MHz.
  366. */
  367. if (!global.turbo_disabled)
  368. cpu->acpi_perf_data.states[0].core_frequency =
  369. policy->cpuinfo.max_freq / 1000;
  370. cpu->valid_pss_table = true;
  371. pr_debug("_PPC limits will be enforced\n");
  372. return;
  373. err:
  374. cpu->valid_pss_table = false;
  375. acpi_processor_unregister_performance(policy->cpu);
  376. }
  377. static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
  378. {
  379. struct cpudata *cpu;
  380. cpu = all_cpu_data[policy->cpu];
  381. if (!cpu->valid_pss_table)
  382. return;
  383. acpi_processor_unregister_performance(policy->cpu);
  384. }
  385. #else
  386. static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
  387. {
  388. }
  389. static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
  390. {
  391. }
  392. #endif
  393. static inline void update_turbo_state(void)
  394. {
  395. u64 misc_en;
  396. struct cpudata *cpu;
  397. cpu = all_cpu_data[0];
  398. rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
  399. global.turbo_disabled =
  400. (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
  401. cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
  402. }
  403. static int min_perf_pct_min(void)
  404. {
  405. struct cpudata *cpu = all_cpu_data[0];
  406. int turbo_pstate = cpu->pstate.turbo_pstate;
  407. return turbo_pstate ?
  408. (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
  409. }
  410. static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
  411. {
  412. u64 epb;
  413. int ret;
  414. if (!static_cpu_has(X86_FEATURE_EPB))
  415. return -ENXIO;
  416. ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
  417. if (ret)
  418. return (s16)ret;
  419. return (s16)(epb & 0x0f);
  420. }
  421. static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
  422. {
  423. s16 epp;
  424. if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
  425. /*
  426. * When hwp_req_data is 0, means that caller didn't read
  427. * MSR_HWP_REQUEST, so need to read and get EPP.
  428. */
  429. if (!hwp_req_data) {
  430. epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
  431. &hwp_req_data);
  432. if (epp)
  433. return epp;
  434. }
  435. epp = (hwp_req_data >> 24) & 0xff;
  436. } else {
  437. /* When there is no EPP present, HWP uses EPB settings */
  438. epp = intel_pstate_get_epb(cpu_data);
  439. }
  440. return epp;
  441. }
  442. static int intel_pstate_set_epb(int cpu, s16 pref)
  443. {
  444. u64 epb;
  445. int ret;
  446. if (!static_cpu_has(X86_FEATURE_EPB))
  447. return -ENXIO;
  448. ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
  449. if (ret)
  450. return ret;
  451. epb = (epb & ~0x0f) | pref;
  452. wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
  453. return 0;
  454. }
  455. /*
  456. * EPP/EPB display strings corresponding to EPP index in the
  457. * energy_perf_strings[]
  458. * index String
  459. *-------------------------------------
  460. * 0 default
  461. * 1 performance
  462. * 2 balance_performance
  463. * 3 balance_power
  464. * 4 power
  465. */
  466. static const char * const energy_perf_strings[] = {
  467. "default",
  468. "performance",
  469. "balance_performance",
  470. "balance_power",
  471. "power",
  472. NULL
  473. };
  474. static const unsigned int epp_values[] = {
  475. HWP_EPP_PERFORMANCE,
  476. HWP_EPP_BALANCE_PERFORMANCE,
  477. HWP_EPP_BALANCE_POWERSAVE,
  478. HWP_EPP_POWERSAVE
  479. };
  480. static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
  481. {
  482. s16 epp;
  483. int index = -EINVAL;
  484. epp = intel_pstate_get_epp(cpu_data, 0);
  485. if (epp < 0)
  486. return epp;
  487. if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
  488. if (epp == HWP_EPP_PERFORMANCE)
  489. return 1;
  490. if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
  491. return 2;
  492. if (epp <= HWP_EPP_BALANCE_POWERSAVE)
  493. return 3;
  494. else
  495. return 4;
  496. } else if (static_cpu_has(X86_FEATURE_EPB)) {
  497. /*
  498. * Range:
  499. * 0x00-0x03 : Performance
  500. * 0x04-0x07 : Balance performance
  501. * 0x08-0x0B : Balance power
  502. * 0x0C-0x0F : Power
  503. * The EPB is a 4 bit value, but our ranges restrict the
  504. * value which can be set. Here only using top two bits
  505. * effectively.
  506. */
  507. index = (epp >> 2) + 1;
  508. }
  509. return index;
  510. }
  511. static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
  512. int pref_index)
  513. {
  514. int epp = -EINVAL;
  515. int ret;
  516. if (!pref_index)
  517. epp = cpu_data->epp_default;
  518. mutex_lock(&intel_pstate_limits_lock);
  519. if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
  520. u64 value;
  521. ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
  522. if (ret)
  523. goto return_pref;
  524. value &= ~GENMASK_ULL(31, 24);
  525. if (epp == -EINVAL)
  526. epp = epp_values[pref_index - 1];
  527. value |= (u64)epp << 24;
  528. ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
  529. } else {
  530. if (epp == -EINVAL)
  531. epp = (pref_index - 1) << 2;
  532. ret = intel_pstate_set_epb(cpu_data->cpu, epp);
  533. }
  534. return_pref:
  535. mutex_unlock(&intel_pstate_limits_lock);
  536. return ret;
  537. }
  538. static ssize_t show_energy_performance_available_preferences(
  539. struct cpufreq_policy *policy, char *buf)
  540. {
  541. int i = 0;
  542. int ret = 0;
  543. while (energy_perf_strings[i] != NULL)
  544. ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
  545. ret += sprintf(&buf[ret], "\n");
  546. return ret;
  547. }
  548. cpufreq_freq_attr_ro(energy_performance_available_preferences);
  549. static ssize_t store_energy_performance_preference(
  550. struct cpufreq_policy *policy, const char *buf, size_t count)
  551. {
  552. struct cpudata *cpu_data = all_cpu_data[policy->cpu];
  553. char str_preference[21];
  554. int ret, i = 0;
  555. ret = sscanf(buf, "%20s", str_preference);
  556. if (ret != 1)
  557. return -EINVAL;
  558. while (energy_perf_strings[i] != NULL) {
  559. if (!strcmp(str_preference, energy_perf_strings[i])) {
  560. intel_pstate_set_energy_pref_index(cpu_data, i);
  561. return count;
  562. }
  563. ++i;
  564. }
  565. return -EINVAL;
  566. }
  567. static ssize_t show_energy_performance_preference(
  568. struct cpufreq_policy *policy, char *buf)
  569. {
  570. struct cpudata *cpu_data = all_cpu_data[policy->cpu];
  571. int preference;
  572. preference = intel_pstate_get_energy_pref_index(cpu_data);
  573. if (preference < 0)
  574. return preference;
  575. return sprintf(buf, "%s\n", energy_perf_strings[preference]);
  576. }
  577. cpufreq_freq_attr_rw(energy_performance_preference);
  578. static struct freq_attr *hwp_cpufreq_attrs[] = {
  579. &energy_performance_preference,
  580. &energy_performance_available_preferences,
  581. NULL,
  582. };
  583. static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
  584. int *current_max)
  585. {
  586. u64 cap;
  587. rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
  588. if (global.no_turbo)
  589. *current_max = HWP_GUARANTEED_PERF(cap);
  590. else
  591. *current_max = HWP_HIGHEST_PERF(cap);
  592. *phy_max = HWP_HIGHEST_PERF(cap);
  593. }
  594. static void intel_pstate_hwp_set(unsigned int cpu)
  595. {
  596. struct cpudata *cpu_data = all_cpu_data[cpu];
  597. int max, min;
  598. u64 value;
  599. s16 epp;
  600. max = cpu_data->max_perf_ratio;
  601. min = cpu_data->min_perf_ratio;
  602. if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
  603. min = max;
  604. rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
  605. value &= ~HWP_MIN_PERF(~0L);
  606. value |= HWP_MIN_PERF(min);
  607. value &= ~HWP_MAX_PERF(~0L);
  608. value |= HWP_MAX_PERF(max);
  609. if (cpu_data->epp_policy == cpu_data->policy)
  610. goto skip_epp;
  611. cpu_data->epp_policy = cpu_data->policy;
  612. if (cpu_data->epp_saved >= 0) {
  613. epp = cpu_data->epp_saved;
  614. cpu_data->epp_saved = -EINVAL;
  615. goto update_epp;
  616. }
  617. if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
  618. epp = intel_pstate_get_epp(cpu_data, value);
  619. cpu_data->epp_powersave = epp;
  620. /* If EPP read was failed, then don't try to write */
  621. if (epp < 0)
  622. goto skip_epp;
  623. epp = 0;
  624. } else {
  625. /* skip setting EPP, when saved value is invalid */
  626. if (cpu_data->epp_powersave < 0)
  627. goto skip_epp;
  628. /*
  629. * No need to restore EPP when it is not zero. This
  630. * means:
  631. * - Policy is not changed
  632. * - user has manually changed
  633. * - Error reading EPB
  634. */
  635. epp = intel_pstate_get_epp(cpu_data, value);
  636. if (epp)
  637. goto skip_epp;
  638. epp = cpu_data->epp_powersave;
  639. }
  640. update_epp:
  641. if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
  642. value &= ~GENMASK_ULL(31, 24);
  643. value |= (u64)epp << 24;
  644. } else {
  645. intel_pstate_set_epb(cpu, epp);
  646. }
  647. skip_epp:
  648. wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
  649. }
  650. static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
  651. {
  652. struct cpudata *cpu_data = all_cpu_data[policy->cpu];
  653. if (!hwp_active)
  654. return 0;
  655. cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);
  656. return 0;
  657. }
  658. static int intel_pstate_resume(struct cpufreq_policy *policy)
  659. {
  660. if (!hwp_active)
  661. return 0;
  662. mutex_lock(&intel_pstate_limits_lock);
  663. all_cpu_data[policy->cpu]->epp_policy = 0;
  664. intel_pstate_hwp_set(policy->cpu);
  665. mutex_unlock(&intel_pstate_limits_lock);
  666. return 0;
  667. }
  668. static void intel_pstate_update_policies(void)
  669. {
  670. int cpu;
  671. for_each_possible_cpu(cpu)
  672. cpufreq_update_policy(cpu);
  673. }
  674. /************************** sysfs begin ************************/
  675. #define show_one(file_name, object) \
  676. static ssize_t show_##file_name \
  677. (struct kobject *kobj, struct attribute *attr, char *buf) \
  678. { \
  679. return sprintf(buf, "%u\n", global.object); \
  680. }
  681. static ssize_t intel_pstate_show_status(char *buf);
  682. static int intel_pstate_update_status(const char *buf, size_t size);
  683. static ssize_t show_status(struct kobject *kobj,
  684. struct attribute *attr, char *buf)
  685. {
  686. ssize_t ret;
  687. mutex_lock(&intel_pstate_driver_lock);
  688. ret = intel_pstate_show_status(buf);
  689. mutex_unlock(&intel_pstate_driver_lock);
  690. return ret;
  691. }
  692. static ssize_t store_status(struct kobject *a, struct attribute *b,
  693. const char *buf, size_t count)
  694. {
  695. char *p = memchr(buf, '\n', count);
  696. int ret;
  697. mutex_lock(&intel_pstate_driver_lock);
  698. ret = intel_pstate_update_status(buf, p ? p - buf : count);
  699. mutex_unlock(&intel_pstate_driver_lock);
  700. return ret < 0 ? ret : count;
  701. }
  702. static ssize_t show_turbo_pct(struct kobject *kobj,
  703. struct attribute *attr, char *buf)
  704. {
  705. struct cpudata *cpu;
  706. int total, no_turbo, turbo_pct;
  707. uint32_t turbo_fp;
  708. mutex_lock(&intel_pstate_driver_lock);
  709. if (!intel_pstate_driver) {
  710. mutex_unlock(&intel_pstate_driver_lock);
  711. return -EAGAIN;
  712. }
  713. cpu = all_cpu_data[0];
  714. total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
  715. no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
  716. turbo_fp = div_fp(no_turbo, total);
  717. turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
  718. mutex_unlock(&intel_pstate_driver_lock);
  719. return sprintf(buf, "%u\n", turbo_pct);
  720. }
  721. static ssize_t show_num_pstates(struct kobject *kobj,
  722. struct attribute *attr, char *buf)
  723. {
  724. struct cpudata *cpu;
  725. int total;
  726. mutex_lock(&intel_pstate_driver_lock);
  727. if (!intel_pstate_driver) {
  728. mutex_unlock(&intel_pstate_driver_lock);
  729. return -EAGAIN;
  730. }
  731. cpu = all_cpu_data[0];
  732. total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
  733. mutex_unlock(&intel_pstate_driver_lock);
  734. return sprintf(buf, "%u\n", total);
  735. }
  736. static ssize_t show_no_turbo(struct kobject *kobj,
  737. struct attribute *attr, char *buf)
  738. {
  739. ssize_t ret;
  740. mutex_lock(&intel_pstate_driver_lock);
  741. if (!intel_pstate_driver) {
  742. mutex_unlock(&intel_pstate_driver_lock);
  743. return -EAGAIN;
  744. }
  745. update_turbo_state();
  746. if (global.turbo_disabled)
  747. ret = sprintf(buf, "%u\n", global.turbo_disabled);
  748. else
  749. ret = sprintf(buf, "%u\n", global.no_turbo);
  750. mutex_unlock(&intel_pstate_driver_lock);
  751. return ret;
  752. }
  753. static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
  754. const char *buf, size_t count)
  755. {
  756. unsigned int input;
  757. int ret;
  758. ret = sscanf(buf, "%u", &input);
  759. if (ret != 1)
  760. return -EINVAL;
  761. mutex_lock(&intel_pstate_driver_lock);
  762. if (!intel_pstate_driver) {
  763. mutex_unlock(&intel_pstate_driver_lock);
  764. return -EAGAIN;
  765. }
  766. mutex_lock(&intel_pstate_limits_lock);
  767. update_turbo_state();
  768. if (global.turbo_disabled) {
  769. pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
  770. mutex_unlock(&intel_pstate_limits_lock);
  771. mutex_unlock(&intel_pstate_driver_lock);
  772. return -EPERM;
  773. }
  774. global.no_turbo = clamp_t(int, input, 0, 1);
  775. if (global.no_turbo) {
  776. struct cpudata *cpu = all_cpu_data[0];
  777. int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
  778. /* Squash the global minimum into the permitted range. */
  779. if (global.min_perf_pct > pct)
  780. global.min_perf_pct = pct;
  781. }
  782. mutex_unlock(&intel_pstate_limits_lock);
  783. intel_pstate_update_policies();
  784. mutex_unlock(&intel_pstate_driver_lock);
  785. return count;
  786. }
  787. static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
  788. const char *buf, size_t count)
  789. {
  790. unsigned int input;
  791. int ret;
  792. ret = sscanf(buf, "%u", &input);
  793. if (ret != 1)
  794. return -EINVAL;
  795. mutex_lock(&intel_pstate_driver_lock);
  796. if (!intel_pstate_driver) {
  797. mutex_unlock(&intel_pstate_driver_lock);
  798. return -EAGAIN;
  799. }
  800. mutex_lock(&intel_pstate_limits_lock);
  801. global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
  802. mutex_unlock(&intel_pstate_limits_lock);
  803. intel_pstate_update_policies();
  804. mutex_unlock(&intel_pstate_driver_lock);
  805. return count;
  806. }
  807. static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
  808. const char *buf, size_t count)
  809. {
  810. unsigned int input;
  811. int ret;
  812. ret = sscanf(buf, "%u", &input);
  813. if (ret != 1)
  814. return -EINVAL;
  815. mutex_lock(&intel_pstate_driver_lock);
  816. if (!intel_pstate_driver) {
  817. mutex_unlock(&intel_pstate_driver_lock);
  818. return -EAGAIN;
  819. }
  820. mutex_lock(&intel_pstate_limits_lock);
  821. global.min_perf_pct = clamp_t(int, input,
  822. min_perf_pct_min(), global.max_perf_pct);
  823. mutex_unlock(&intel_pstate_limits_lock);
  824. intel_pstate_update_policies();
  825. mutex_unlock(&intel_pstate_driver_lock);
  826. return count;
  827. }
  828. show_one(max_perf_pct, max_perf_pct);
  829. show_one(min_perf_pct, min_perf_pct);
  830. define_one_global_rw(status);
  831. define_one_global_rw(no_turbo);
  832. define_one_global_rw(max_perf_pct);
  833. define_one_global_rw(min_perf_pct);
  834. define_one_global_ro(turbo_pct);
  835. define_one_global_ro(num_pstates);
  836. static struct attribute *intel_pstate_attributes[] = {
  837. &status.attr,
  838. &no_turbo.attr,
  839. &turbo_pct.attr,
  840. &num_pstates.attr,
  841. NULL
  842. };
  843. static const struct attribute_group intel_pstate_attr_group = {
  844. .attrs = intel_pstate_attributes,
  845. };
  846. static void __init intel_pstate_sysfs_expose_params(void)
  847. {
  848. struct kobject *intel_pstate_kobject;
  849. int rc;
  850. intel_pstate_kobject = kobject_create_and_add("intel_pstate",
  851. &cpu_subsys.dev_root->kobj);
  852. if (WARN_ON(!intel_pstate_kobject))
  853. return;
  854. rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
  855. if (WARN_ON(rc))
  856. return;
  857. /*
  858. * If per cpu limits are enforced there are no global limits, so
  859. * return without creating max/min_perf_pct attributes
  860. */
  861. if (per_cpu_limits)
  862. return;
  863. rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
  864. WARN_ON(rc);
  865. rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
  866. WARN_ON(rc);
  867. }
  868. /************************** sysfs end ************************/
  869. static void intel_pstate_hwp_enable(struct cpudata *cpudata)
  870. {
  871. /* First disable HWP notification interrupt as we don't process them */
  872. if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
  873. wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
  874. wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
  875. cpudata->epp_policy = 0;
  876. if (cpudata->epp_default == -EINVAL)
  877. cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
  878. }
  879. #define MSR_IA32_POWER_CTL_BIT_EE 19
  880. /* Disable energy efficiency optimization */
  881. static void intel_pstate_disable_ee(int cpu)
  882. {
  883. u64 power_ctl;
  884. int ret;
  885. ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
  886. if (ret)
  887. return;
  888. if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
  889. pr_info("Disabling energy efficiency optimization\n");
  890. power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
  891. wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
  892. }
  893. }
  894. static int atom_get_min_pstate(void)
  895. {
  896. u64 value;
  897. rdmsrl(MSR_ATOM_CORE_RATIOS, value);
  898. return (value >> 8) & 0x7F;
  899. }
  900. static int atom_get_max_pstate(void)
  901. {
  902. u64 value;
  903. rdmsrl(MSR_ATOM_CORE_RATIOS, value);
  904. return (value >> 16) & 0x7F;
  905. }
  906. static int atom_get_turbo_pstate(void)
  907. {
  908. u64 value;
  909. rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
  910. return value & 0x7F;
  911. }
  912. static u64 atom_get_val(struct cpudata *cpudata, int pstate)
  913. {
  914. u64 val;
  915. int32_t vid_fp;
  916. u32 vid;
  917. val = (u64)pstate << 8;
  918. if (global.no_turbo && !global.turbo_disabled)
  919. val |= (u64)1 << 32;
  920. vid_fp = cpudata->vid.min + mul_fp(
  921. int_tofp(pstate - cpudata->pstate.min_pstate),
  922. cpudata->vid.ratio);
  923. vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
  924. vid = ceiling_fp(vid_fp);
  925. if (pstate > cpudata->pstate.max_pstate)
  926. vid = cpudata->vid.turbo;
  927. return val | vid;
  928. }
  929. static int silvermont_get_scaling(void)
  930. {
  931. u64 value;
  932. int i;
  933. /* Defined in Table 35-6 from SDM (Sept 2015) */
  934. static int silvermont_freq_table[] = {
  935. 83300, 100000, 133300, 116700, 80000};
  936. rdmsrl(MSR_FSB_FREQ, value);
  937. i = value & 0x7;
  938. WARN_ON(i > 4);
  939. return silvermont_freq_table[i];
  940. }
  941. static int airmont_get_scaling(void)
  942. {
  943. u64 value;
  944. int i;
  945. /* Defined in Table 35-10 from SDM (Sept 2015) */
  946. static int airmont_freq_table[] = {
  947. 83300, 100000, 133300, 116700, 80000,
  948. 93300, 90000, 88900, 87500};
  949. rdmsrl(MSR_FSB_FREQ, value);
  950. i = value & 0xF;
  951. WARN_ON(i > 8);
  952. return airmont_freq_table[i];
  953. }
  954. static void atom_get_vid(struct cpudata *cpudata)
  955. {
  956. u64 value;
  957. rdmsrl(MSR_ATOM_CORE_VIDS, value);
  958. cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
  959. cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
  960. cpudata->vid.ratio = div_fp(
  961. cpudata->vid.max - cpudata->vid.min,
  962. int_tofp(cpudata->pstate.max_pstate -
  963. cpudata->pstate.min_pstate));
  964. rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
  965. cpudata->vid.turbo = value & 0x7f;
  966. }
  967. static int core_get_min_pstate(void)
  968. {
  969. u64 value;
  970. rdmsrl(MSR_PLATFORM_INFO, value);
  971. return (value >> 40) & 0xFF;
  972. }
  973. static int core_get_max_pstate_physical(void)
  974. {
  975. u64 value;
  976. rdmsrl(MSR_PLATFORM_INFO, value);
  977. return (value >> 8) & 0xFF;
  978. }
  979. static int core_get_tdp_ratio(u64 plat_info)
  980. {
  981. /* Check how many TDP levels present */
  982. if (plat_info & 0x600000000) {
  983. u64 tdp_ctrl;
  984. u64 tdp_ratio;
  985. int tdp_msr;
  986. int err;
  987. /* Get the TDP level (0, 1, 2) to get ratios */
  988. err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
  989. if (err)
  990. return err;
  991. /* TDP MSR are continuous starting at 0x648 */
  992. tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
  993. err = rdmsrl_safe(tdp_msr, &tdp_ratio);
  994. if (err)
  995. return err;
  996. /* For level 1 and 2, bits[23:16] contain the ratio */
  997. if (tdp_ctrl & 0x03)
  998. tdp_ratio >>= 16;
  999. tdp_ratio &= 0xff; /* ratios are only 8 bits long */
  1000. pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
  1001. return (int)tdp_ratio;
  1002. }
  1003. return -ENXIO;
  1004. }
  1005. static int core_get_max_pstate(void)
  1006. {
  1007. u64 tar;
  1008. u64 plat_info;
  1009. int max_pstate;
  1010. int tdp_ratio;
  1011. int err;
  1012. rdmsrl(MSR_PLATFORM_INFO, plat_info);
  1013. max_pstate = (plat_info >> 8) & 0xFF;
  1014. tdp_ratio = core_get_tdp_ratio(plat_info);
  1015. if (tdp_ratio <= 0)
  1016. return max_pstate;
  1017. if (hwp_active) {
  1018. /* Turbo activation ratio is not used on HWP platforms */
  1019. return tdp_ratio;
  1020. }
  1021. err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
  1022. if (!err) {
  1023. int tar_levels;
  1024. /* Do some sanity checking for safety */
  1025. tar_levels = tar & 0xff;
  1026. if (tdp_ratio - 1 == tar_levels) {
  1027. max_pstate = tar_levels;
  1028. pr_debug("max_pstate=TAC %x\n", max_pstate);
  1029. }
  1030. }
  1031. return max_pstate;
  1032. }
  1033. static int core_get_turbo_pstate(void)
  1034. {
  1035. u64 value;
  1036. int nont, ret;
  1037. rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
  1038. nont = core_get_max_pstate();
  1039. ret = (value) & 255;
  1040. if (ret <= nont)
  1041. ret = nont;
  1042. return ret;
  1043. }
  1044. static inline int core_get_scaling(void)
  1045. {
  1046. return 100000;
  1047. }
  1048. static u64 core_get_val(struct cpudata *cpudata, int pstate)
  1049. {
  1050. u64 val;
  1051. val = (u64)pstate << 8;
  1052. if (global.no_turbo && !global.turbo_disabled)
  1053. val |= (u64)1 << 32;
  1054. return val;
  1055. }
  1056. static int knl_get_aperf_mperf_shift(void)
  1057. {
  1058. return 10;
  1059. }
  1060. static int knl_get_turbo_pstate(void)
  1061. {
  1062. u64 value;
  1063. int nont, ret;
  1064. rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
  1065. nont = core_get_max_pstate();
  1066. ret = (((value) >> 8) & 0xFF);
  1067. if (ret <= nont)
  1068. ret = nont;
  1069. return ret;
  1070. }
  1071. static int intel_pstate_get_base_pstate(struct cpudata *cpu)
  1072. {
  1073. return global.no_turbo || global.turbo_disabled ?
  1074. cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
  1075. }
  1076. static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
  1077. {
  1078. trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
  1079. cpu->pstate.current_pstate = pstate;
  1080. /*
  1081. * Generally, there is no guarantee that this code will always run on
  1082. * the CPU being updated, so force the register update to run on the
  1083. * right CPU.
  1084. */
  1085. wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
  1086. pstate_funcs.get_val(cpu, pstate));
  1087. }
  1088. static void intel_pstate_set_min_pstate(struct cpudata *cpu)
  1089. {
  1090. intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
  1091. }
  1092. static void intel_pstate_max_within_limits(struct cpudata *cpu)
  1093. {
  1094. int pstate;
  1095. update_turbo_state();
  1096. pstate = intel_pstate_get_base_pstate(cpu);
  1097. pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
  1098. intel_pstate_set_pstate(cpu, pstate);
  1099. }
  1100. static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
  1101. {
  1102. cpu->pstate.min_pstate = pstate_funcs.get_min();
  1103. cpu->pstate.max_pstate = pstate_funcs.get_max();
  1104. cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
  1105. cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
  1106. cpu->pstate.scaling = pstate_funcs.get_scaling();
  1107. cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
  1108. cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
  1109. if (pstate_funcs.get_aperf_mperf_shift)
  1110. cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
  1111. if (pstate_funcs.get_vid)
  1112. pstate_funcs.get_vid(cpu);
  1113. intel_pstate_set_min_pstate(cpu);
  1114. }
  1115. static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
  1116. {
  1117. struct sample *sample = &cpu->sample;
  1118. sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
  1119. }
  1120. static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
  1121. {
  1122. u64 aperf, mperf;
  1123. unsigned long flags;
  1124. u64 tsc;
  1125. local_irq_save(flags);
  1126. rdmsrl(MSR_IA32_APERF, aperf);
  1127. rdmsrl(MSR_IA32_MPERF, mperf);
  1128. tsc = rdtsc();
  1129. if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
  1130. local_irq_restore(flags);
  1131. return false;
  1132. }
  1133. local_irq_restore(flags);
  1134. cpu->last_sample_time = cpu->sample.time;
  1135. cpu->sample.time = time;
  1136. cpu->sample.aperf = aperf;
  1137. cpu->sample.mperf = mperf;
  1138. cpu->sample.tsc = tsc;
  1139. cpu->sample.aperf -= cpu->prev_aperf;
  1140. cpu->sample.mperf -= cpu->prev_mperf;
  1141. cpu->sample.tsc -= cpu->prev_tsc;
  1142. cpu->prev_aperf = aperf;
  1143. cpu->prev_mperf = mperf;
  1144. cpu->prev_tsc = tsc;
  1145. /*
  1146. * First time this function is invoked in a given cycle, all of the
  1147. * previous sample data fields are equal to zero or stale and they must
  1148. * be populated with meaningful numbers for things to work, so assume
  1149. * that sample.time will always be reset before setting the utilization
  1150. * update hook and make the caller skip the sample then.
  1151. */
  1152. if (cpu->last_sample_time) {
  1153. intel_pstate_calc_avg_perf(cpu);
  1154. return true;
  1155. }
  1156. return false;
  1157. }
  1158. static inline int32_t get_avg_frequency(struct cpudata *cpu)
  1159. {
  1160. return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
  1161. }
  1162. static inline int32_t get_avg_pstate(struct cpudata *cpu)
  1163. {
  1164. return mul_ext_fp(cpu->pstate.max_pstate_physical,
  1165. cpu->sample.core_avg_perf);
  1166. }
  1167. static inline int32_t get_target_pstate(struct cpudata *cpu)
  1168. {
  1169. struct sample *sample = &cpu->sample;
  1170. int32_t busy_frac, boost;
  1171. int target, avg_pstate;
  1172. busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
  1173. sample->tsc);
  1174. boost = cpu->iowait_boost;
  1175. cpu->iowait_boost >>= 1;
  1176. if (busy_frac < boost)
  1177. busy_frac = boost;
  1178. sample->busy_scaled = busy_frac * 100;
  1179. target = global.no_turbo || global.turbo_disabled ?
  1180. cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
  1181. target += target >> 2;
  1182. target = mul_fp(target, busy_frac);
  1183. if (target < cpu->pstate.min_pstate)
  1184. target = cpu->pstate.min_pstate;
  1185. /*
  1186. * If the average P-state during the previous cycle was higher than the
  1187. * current target, add 50% of the difference to the target to reduce
  1188. * possible performance oscillations and offset possible performance
  1189. * loss related to moving the workload from one CPU to another within
  1190. * a package/module.
  1191. */
  1192. avg_pstate = get_avg_pstate(cpu);
  1193. if (avg_pstate > target)
  1194. target += (avg_pstate - target) >> 1;
  1195. return target;
  1196. }
  1197. static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
  1198. {
  1199. int max_pstate = intel_pstate_get_base_pstate(cpu);
  1200. int min_pstate;
  1201. min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
  1202. max_pstate = max(min_pstate, cpu->max_perf_ratio);
  1203. return clamp_t(int, pstate, min_pstate, max_pstate);
  1204. }
  1205. static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
  1206. {
  1207. if (pstate == cpu->pstate.current_pstate)
  1208. return;
  1209. cpu->pstate.current_pstate = pstate;
  1210. wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
  1211. }
  1212. static void intel_pstate_adjust_pstate(struct cpudata *cpu)
  1213. {
  1214. int from = cpu->pstate.current_pstate;
  1215. struct sample *sample;
  1216. int target_pstate;
  1217. update_turbo_state();
  1218. target_pstate = get_target_pstate(cpu);
  1219. target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
  1220. trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
  1221. intel_pstate_update_pstate(cpu, target_pstate);
  1222. sample = &cpu->sample;
  1223. trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
  1224. fp_toint(sample->busy_scaled),
  1225. from,
  1226. cpu->pstate.current_pstate,
  1227. sample->mperf,
  1228. sample->aperf,
  1229. sample->tsc,
  1230. get_avg_frequency(cpu),
  1231. fp_toint(cpu->iowait_boost * 100));
  1232. }
  1233. static void intel_pstate_update_util(struct update_util_data *data, u64 time,
  1234. unsigned int flags)
  1235. {
  1236. struct cpudata *cpu = container_of(data, struct cpudata, update_util);
  1237. u64 delta_ns;
  1238. /* Don't allow remote callbacks */
  1239. if (smp_processor_id() != cpu->cpu)
  1240. return;
  1241. if (flags & SCHED_CPUFREQ_IOWAIT) {
  1242. cpu->iowait_boost = int_tofp(1);
  1243. cpu->last_update = time;
  1244. /*
  1245. * The last time the busy was 100% so P-state was max anyway
  1246. * so avoid overhead of computation.
  1247. */
  1248. if (fp_toint(cpu->sample.busy_scaled) == 100)
  1249. return;
  1250. goto set_pstate;
  1251. } else if (cpu->iowait_boost) {
  1252. /* Clear iowait_boost if the CPU may have been idle. */
  1253. delta_ns = time - cpu->last_update;
  1254. if (delta_ns > TICK_NSEC)
  1255. cpu->iowait_boost = 0;
  1256. }
  1257. cpu->last_update = time;
  1258. delta_ns = time - cpu->sample.time;
  1259. if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
  1260. return;
  1261. set_pstate:
  1262. if (intel_pstate_sample(cpu, time))
  1263. intel_pstate_adjust_pstate(cpu);
  1264. }
  1265. static struct pstate_funcs core_funcs = {
  1266. .get_max = core_get_max_pstate,
  1267. .get_max_physical = core_get_max_pstate_physical,
  1268. .get_min = core_get_min_pstate,
  1269. .get_turbo = core_get_turbo_pstate,
  1270. .get_scaling = core_get_scaling,
  1271. .get_val = core_get_val,
  1272. };
  1273. static const struct pstate_funcs silvermont_funcs = {
  1274. .get_max = atom_get_max_pstate,
  1275. .get_max_physical = atom_get_max_pstate,
  1276. .get_min = atom_get_min_pstate,
  1277. .get_turbo = atom_get_turbo_pstate,
  1278. .get_val = atom_get_val,
  1279. .get_scaling = silvermont_get_scaling,
  1280. .get_vid = atom_get_vid,
  1281. };
  1282. static const struct pstate_funcs airmont_funcs = {
  1283. .get_max = atom_get_max_pstate,
  1284. .get_max_physical = atom_get_max_pstate,
  1285. .get_min = atom_get_min_pstate,
  1286. .get_turbo = atom_get_turbo_pstate,
  1287. .get_val = atom_get_val,
  1288. .get_scaling = airmont_get_scaling,
  1289. .get_vid = atom_get_vid,
  1290. };
  1291. static const struct pstate_funcs knl_funcs = {
  1292. .get_max = core_get_max_pstate,
  1293. .get_max_physical = core_get_max_pstate_physical,
  1294. .get_min = core_get_min_pstate,
  1295. .get_turbo = knl_get_turbo_pstate,
  1296. .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
  1297. .get_scaling = core_get_scaling,
  1298. .get_val = core_get_val,
  1299. };
  1300. static const struct pstate_funcs bxt_funcs = {
  1301. .get_max = core_get_max_pstate,
  1302. .get_max_physical = core_get_max_pstate_physical,
  1303. .get_min = core_get_min_pstate,
  1304. .get_turbo = core_get_turbo_pstate,
  1305. .get_scaling = core_get_scaling,
  1306. .get_val = core_get_val,
  1307. };
  1308. #define ICPU(model, policy) \
  1309. { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
  1310. (unsigned long)&policy }
  1311. static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
  1312. ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
  1313. ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs),
  1314. ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs),
  1315. ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs),
  1316. ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs),
  1317. ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs),
  1318. ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs),
  1319. ICPU(INTEL_FAM6_HASWELL_X, core_funcs),
  1320. ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs),
  1321. ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs),
  1322. ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs),
  1323. ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs),
  1324. ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs),
  1325. ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
  1326. ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs),
  1327. ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
  1328. ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
  1329. ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
  1330. ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs),
  1331. ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs),
  1332. {}
  1333. };
  1334. MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
  1335. static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
  1336. ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
  1337. ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
  1338. ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
  1339. {}
  1340. };
  1341. static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
  1342. ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs),
  1343. {}
  1344. };
  1345. static int intel_pstate_init_cpu(unsigned int cpunum)
  1346. {
  1347. struct cpudata *cpu;
  1348. cpu = all_cpu_data[cpunum];
  1349. if (!cpu) {
  1350. cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
  1351. if (!cpu)
  1352. return -ENOMEM;
  1353. all_cpu_data[cpunum] = cpu;
  1354. cpu->epp_default = -EINVAL;
  1355. cpu->epp_powersave = -EINVAL;
  1356. cpu->epp_saved = -EINVAL;
  1357. }
  1358. cpu = all_cpu_data[cpunum];
  1359. cpu->cpu = cpunum;
  1360. if (hwp_active) {
  1361. const struct x86_cpu_id *id;
  1362. id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
  1363. if (id)
  1364. intel_pstate_disable_ee(cpunum);
  1365. intel_pstate_hwp_enable(cpu);
  1366. }
  1367. intel_pstate_get_cpu_pstates(cpu);
  1368. pr_debug("controlling: cpu %d\n", cpunum);
  1369. return 0;
  1370. }
  1371. static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
  1372. {
  1373. struct cpudata *cpu = all_cpu_data[cpu_num];
  1374. if (hwp_active)
  1375. return;
  1376. if (cpu->update_util_set)
  1377. return;
  1378. /* Prevent intel_pstate_update_util() from using stale data. */
  1379. cpu->sample.time = 0;
  1380. cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
  1381. intel_pstate_update_util);
  1382. cpu->update_util_set = true;
  1383. }
  1384. static void intel_pstate_clear_update_util_hook(unsigned int cpu)
  1385. {
  1386. struct cpudata *cpu_data = all_cpu_data[cpu];
  1387. if (!cpu_data->update_util_set)
  1388. return;
  1389. cpufreq_remove_update_util_hook(cpu);
  1390. cpu_data->update_util_set = false;
  1391. synchronize_sched();
  1392. }
  1393. static int intel_pstate_get_max_freq(struct cpudata *cpu)
  1394. {
  1395. return global.turbo_disabled || global.no_turbo ?
  1396. cpu->pstate.max_freq : cpu->pstate.turbo_freq;
  1397. }
  1398. static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
  1399. struct cpudata *cpu)
  1400. {
  1401. int max_freq = intel_pstate_get_max_freq(cpu);
  1402. int32_t max_policy_perf, min_policy_perf;
  1403. int max_state, turbo_max;
  1404. /*
  1405. * HWP needs some special consideration, because on BDX the
  1406. * HWP_REQUEST uses abstract value to represent performance
  1407. * rather than pure ratios.
  1408. */
  1409. if (hwp_active) {
  1410. intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
  1411. } else {
  1412. max_state = intel_pstate_get_base_pstate(cpu);
  1413. turbo_max = cpu->pstate.turbo_pstate;
  1414. }
  1415. max_policy_perf = max_state * policy->max / max_freq;
  1416. if (policy->max == policy->min) {
  1417. min_policy_perf = max_policy_perf;
  1418. } else {
  1419. min_policy_perf = max_state * policy->min / max_freq;
  1420. min_policy_perf = clamp_t(int32_t, min_policy_perf,
  1421. 0, max_policy_perf);
  1422. }
  1423. pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
  1424. policy->cpu, max_state,
  1425. min_policy_perf, max_policy_perf);
  1426. /* Normalize user input to [min_perf, max_perf] */
  1427. if (per_cpu_limits) {
  1428. cpu->min_perf_ratio = min_policy_perf;
  1429. cpu->max_perf_ratio = max_policy_perf;
  1430. } else {
  1431. int32_t global_min, global_max;
  1432. /* Global limits are in percent of the maximum turbo P-state. */
  1433. global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
  1434. global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
  1435. global_min = clamp_t(int32_t, global_min, 0, global_max);
  1436. pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
  1437. global_min, global_max);
  1438. cpu->min_perf_ratio = max(min_policy_perf, global_min);
  1439. cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
  1440. cpu->max_perf_ratio = min(max_policy_perf, global_max);
  1441. cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
  1442. /* Make sure min_perf <= max_perf */
  1443. cpu->min_perf_ratio = min(cpu->min_perf_ratio,
  1444. cpu->max_perf_ratio);
  1445. }
  1446. pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
  1447. cpu->max_perf_ratio,
  1448. cpu->min_perf_ratio);
  1449. }
  1450. static int intel_pstate_set_policy(struct cpufreq_policy *policy)
  1451. {
  1452. struct cpudata *cpu;
  1453. if (!policy->cpuinfo.max_freq)
  1454. return -ENODEV;
  1455. pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
  1456. policy->cpuinfo.max_freq, policy->max);
  1457. cpu = all_cpu_data[policy->cpu];
  1458. cpu->policy = policy->policy;
  1459. mutex_lock(&intel_pstate_limits_lock);
  1460. intel_pstate_update_perf_limits(policy, cpu);
  1461. if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
  1462. /*
  1463. * NOHZ_FULL CPUs need this as the governor callback may not
  1464. * be invoked on them.
  1465. */
  1466. intel_pstate_clear_update_util_hook(policy->cpu);
  1467. intel_pstate_max_within_limits(cpu);
  1468. } else {
  1469. intel_pstate_set_update_util_hook(policy->cpu);
  1470. }
  1471. if (hwp_active)
  1472. intel_pstate_hwp_set(policy->cpu);
  1473. mutex_unlock(&intel_pstate_limits_lock);
  1474. return 0;
  1475. }
  1476. static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
  1477. struct cpudata *cpu)
  1478. {
  1479. if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
  1480. policy->max < policy->cpuinfo.max_freq &&
  1481. policy->max > cpu->pstate.max_freq) {
  1482. pr_debug("policy->max > max non turbo frequency\n");
  1483. policy->max = policy->cpuinfo.max_freq;
  1484. }
  1485. }
  1486. static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
  1487. {
  1488. struct cpudata *cpu = all_cpu_data[policy->cpu];
  1489. update_turbo_state();
  1490. cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
  1491. intel_pstate_get_max_freq(cpu));
  1492. if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
  1493. policy->policy != CPUFREQ_POLICY_PERFORMANCE)
  1494. return -EINVAL;
  1495. intel_pstate_adjust_policy_max(policy, cpu);
  1496. return 0;
  1497. }
  1498. static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
  1499. {
  1500. intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
  1501. }
  1502. static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
  1503. {
  1504. pr_debug("CPU %d exiting\n", policy->cpu);
  1505. intel_pstate_clear_update_util_hook(policy->cpu);
  1506. if (hwp_active)
  1507. intel_pstate_hwp_save_state(policy);
  1508. else
  1509. intel_cpufreq_stop_cpu(policy);
  1510. }
  1511. static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
  1512. {
  1513. intel_pstate_exit_perf_limits(policy);
  1514. policy->fast_switch_possible = false;
  1515. return 0;
  1516. }
  1517. static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
  1518. {
  1519. struct cpudata *cpu;
  1520. int rc;
  1521. rc = intel_pstate_init_cpu(policy->cpu);
  1522. if (rc)
  1523. return rc;
  1524. cpu = all_cpu_data[policy->cpu];
  1525. cpu->max_perf_ratio = 0xFF;
  1526. cpu->min_perf_ratio = 0;
  1527. policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
  1528. policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
  1529. /* cpuinfo and default policy values */
  1530. policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
  1531. update_turbo_state();
  1532. policy->cpuinfo.max_freq = global.turbo_disabled ?
  1533. cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
  1534. policy->cpuinfo.max_freq *= cpu->pstate.scaling;
  1535. intel_pstate_init_acpi_perf_limits(policy);
  1536. policy->fast_switch_possible = true;
  1537. return 0;
  1538. }
  1539. static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
  1540. {
  1541. int ret = __intel_pstate_cpu_init(policy);
  1542. if (ret)
  1543. return ret;
  1544. if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
  1545. policy->policy = CPUFREQ_POLICY_PERFORMANCE;
  1546. else
  1547. policy->policy = CPUFREQ_POLICY_POWERSAVE;
  1548. return 0;
  1549. }
  1550. static struct cpufreq_driver intel_pstate = {
  1551. .flags = CPUFREQ_CONST_LOOPS,
  1552. .verify = intel_pstate_verify_policy,
  1553. .setpolicy = intel_pstate_set_policy,
  1554. .suspend = intel_pstate_hwp_save_state,
  1555. .resume = intel_pstate_resume,
  1556. .init = intel_pstate_cpu_init,
  1557. .exit = intel_pstate_cpu_exit,
  1558. .stop_cpu = intel_pstate_stop_cpu,
  1559. .name = "intel_pstate",
  1560. };
  1561. static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
  1562. {
  1563. struct cpudata *cpu = all_cpu_data[policy->cpu];
  1564. update_turbo_state();
  1565. cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
  1566. intel_pstate_get_max_freq(cpu));
  1567. intel_pstate_adjust_policy_max(policy, cpu);
  1568. intel_pstate_update_perf_limits(policy, cpu);
  1569. return 0;
  1570. }
  1571. static int intel_cpufreq_target(struct cpufreq_policy *policy,
  1572. unsigned int target_freq,
  1573. unsigned int relation)
  1574. {
  1575. struct cpudata *cpu = all_cpu_data[policy->cpu];
  1576. struct cpufreq_freqs freqs;
  1577. int target_pstate;
  1578. update_turbo_state();
  1579. freqs.old = policy->cur;
  1580. freqs.new = target_freq;
  1581. cpufreq_freq_transition_begin(policy, &freqs);
  1582. switch (relation) {
  1583. case CPUFREQ_RELATION_L:
  1584. target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
  1585. break;
  1586. case CPUFREQ_RELATION_H:
  1587. target_pstate = freqs.new / cpu->pstate.scaling;
  1588. break;
  1589. default:
  1590. target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
  1591. break;
  1592. }
  1593. target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
  1594. if (target_pstate != cpu->pstate.current_pstate) {
  1595. cpu->pstate.current_pstate = target_pstate;
  1596. wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
  1597. pstate_funcs.get_val(cpu, target_pstate));
  1598. }
  1599. freqs.new = target_pstate * cpu->pstate.scaling;
  1600. cpufreq_freq_transition_end(policy, &freqs, false);
  1601. return 0;
  1602. }
  1603. static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
  1604. unsigned int target_freq)
  1605. {
  1606. struct cpudata *cpu = all_cpu_data[policy->cpu];
  1607. int target_pstate;
  1608. update_turbo_state();
  1609. target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
  1610. target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
  1611. intel_pstate_update_pstate(cpu, target_pstate);
  1612. return target_pstate * cpu->pstate.scaling;
  1613. }
  1614. static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
  1615. {
  1616. int ret = __intel_pstate_cpu_init(policy);
  1617. if (ret)
  1618. return ret;
  1619. policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
  1620. policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
  1621. /* This reflects the intel_pstate_get_cpu_pstates() setting. */
  1622. policy->cur = policy->cpuinfo.min_freq;
  1623. return 0;
  1624. }
  1625. static struct cpufreq_driver intel_cpufreq = {
  1626. .flags = CPUFREQ_CONST_LOOPS,
  1627. .verify = intel_cpufreq_verify_policy,
  1628. .target = intel_cpufreq_target,
  1629. .fast_switch = intel_cpufreq_fast_switch,
  1630. .init = intel_cpufreq_cpu_init,
  1631. .exit = intel_pstate_cpu_exit,
  1632. .stop_cpu = intel_cpufreq_stop_cpu,
  1633. .name = "intel_cpufreq",
  1634. };
  1635. static struct cpufreq_driver *default_driver = &intel_pstate;
  1636. static void intel_pstate_driver_cleanup(void)
  1637. {
  1638. unsigned int cpu;
  1639. get_online_cpus();
  1640. for_each_online_cpu(cpu) {
  1641. if (all_cpu_data[cpu]) {
  1642. if (intel_pstate_driver == &intel_pstate)
  1643. intel_pstate_clear_update_util_hook(cpu);
  1644. kfree(all_cpu_data[cpu]);
  1645. all_cpu_data[cpu] = NULL;
  1646. }
  1647. }
  1648. put_online_cpus();
  1649. intel_pstate_driver = NULL;
  1650. }
  1651. static int intel_pstate_register_driver(struct cpufreq_driver *driver)
  1652. {
  1653. int ret;
  1654. memset(&global, 0, sizeof(global));
  1655. global.max_perf_pct = 100;
  1656. intel_pstate_driver = driver;
  1657. ret = cpufreq_register_driver(intel_pstate_driver);
  1658. if (ret) {
  1659. intel_pstate_driver_cleanup();
  1660. return ret;
  1661. }
  1662. global.min_perf_pct = min_perf_pct_min();
  1663. return 0;
  1664. }
  1665. static int intel_pstate_unregister_driver(void)
  1666. {
  1667. if (hwp_active)
  1668. return -EBUSY;
  1669. cpufreq_unregister_driver(intel_pstate_driver);
  1670. intel_pstate_driver_cleanup();
  1671. return 0;
  1672. }
  1673. static ssize_t intel_pstate_show_status(char *buf)
  1674. {
  1675. if (!intel_pstate_driver)
  1676. return sprintf(buf, "off\n");
  1677. return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
  1678. "active" : "passive");
  1679. }
  1680. static int intel_pstate_update_status(const char *buf, size_t size)
  1681. {
  1682. int ret;
  1683. if (size == 3 && !strncmp(buf, "off", size))
  1684. return intel_pstate_driver ?
  1685. intel_pstate_unregister_driver() : -EINVAL;
  1686. if (size == 6 && !strncmp(buf, "active", size)) {
  1687. if (intel_pstate_driver) {
  1688. if (intel_pstate_driver == &intel_pstate)
  1689. return 0;
  1690. ret = intel_pstate_unregister_driver();
  1691. if (ret)
  1692. return ret;
  1693. }
  1694. return intel_pstate_register_driver(&intel_pstate);
  1695. }
  1696. if (size == 7 && !strncmp(buf, "passive", size)) {
  1697. if (intel_pstate_driver) {
  1698. if (intel_pstate_driver == &intel_cpufreq)
  1699. return 0;
  1700. ret = intel_pstate_unregister_driver();
  1701. if (ret)
  1702. return ret;
  1703. }
  1704. return intel_pstate_register_driver(&intel_cpufreq);
  1705. }
  1706. return -EINVAL;
  1707. }
  1708. static int no_load __initdata;
  1709. static int no_hwp __initdata;
  1710. static int hwp_only __initdata;
  1711. static unsigned int force_load __initdata;
  1712. static int __init intel_pstate_msrs_not_valid(void)
  1713. {
  1714. if (!pstate_funcs.get_max() ||
  1715. !pstate_funcs.get_min() ||
  1716. !pstate_funcs.get_turbo())
  1717. return -ENODEV;
  1718. return 0;
  1719. }
  1720. static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
  1721. {
  1722. pstate_funcs.get_max = funcs->get_max;
  1723. pstate_funcs.get_max_physical = funcs->get_max_physical;
  1724. pstate_funcs.get_min = funcs->get_min;
  1725. pstate_funcs.get_turbo = funcs->get_turbo;
  1726. pstate_funcs.get_scaling = funcs->get_scaling;
  1727. pstate_funcs.get_val = funcs->get_val;
  1728. pstate_funcs.get_vid = funcs->get_vid;
  1729. pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
  1730. }
  1731. #ifdef CONFIG_ACPI
  1732. static bool __init intel_pstate_no_acpi_pss(void)
  1733. {
  1734. int i;
  1735. for_each_possible_cpu(i) {
  1736. acpi_status status;
  1737. union acpi_object *pss;
  1738. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  1739. struct acpi_processor *pr = per_cpu(processors, i);
  1740. if (!pr)
  1741. continue;
  1742. status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
  1743. if (ACPI_FAILURE(status))
  1744. continue;
  1745. pss = buffer.pointer;
  1746. if (pss && pss->type == ACPI_TYPE_PACKAGE) {
  1747. kfree(pss);
  1748. return false;
  1749. }
  1750. kfree(pss);
  1751. }
  1752. return true;
  1753. }
  1754. static bool __init intel_pstate_has_acpi_ppc(void)
  1755. {
  1756. int i;
  1757. for_each_possible_cpu(i) {
  1758. struct acpi_processor *pr = per_cpu(processors, i);
  1759. if (!pr)
  1760. continue;
  1761. if (acpi_has_method(pr->handle, "_PPC"))
  1762. return true;
  1763. }
  1764. return false;
  1765. }
  1766. enum {
  1767. PSS,
  1768. PPC,
  1769. };
  1770. /* Hardware vendor-specific info that has its own power management modes */
  1771. static struct acpi_platform_list plat_info[] __initdata = {
  1772. {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS},
  1773. {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1774. {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1775. {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1776. {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1777. {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1778. {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1779. {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1780. {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1781. {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1782. {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1783. {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1784. {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1785. {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1786. {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
  1787. { } /* End */
  1788. };
  1789. static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
  1790. {
  1791. const struct x86_cpu_id *id;
  1792. u64 misc_pwr;
  1793. int idx;
  1794. id = x86_match_cpu(intel_pstate_cpu_oob_ids);
  1795. if (id) {
  1796. rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
  1797. if ( misc_pwr & (1 << 8))
  1798. return true;
  1799. }
  1800. idx = acpi_match_platform_list(plat_info);
  1801. if (idx < 0)
  1802. return false;
  1803. switch (plat_info[idx].data) {
  1804. case PSS:
  1805. return intel_pstate_no_acpi_pss();
  1806. case PPC:
  1807. return intel_pstate_has_acpi_ppc() && !force_load;
  1808. }
  1809. return false;
  1810. }
  1811. static void intel_pstate_request_control_from_smm(void)
  1812. {
  1813. /*
  1814. * It may be unsafe to request P-states control from SMM if _PPC support
  1815. * has not been enabled.
  1816. */
  1817. if (acpi_ppc)
  1818. acpi_processor_pstate_control();
  1819. }
  1820. #else /* CONFIG_ACPI not enabled */
  1821. static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
  1822. static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
  1823. static inline void intel_pstate_request_control_from_smm(void) {}
  1824. #endif /* CONFIG_ACPI */
  1825. static const struct x86_cpu_id hwp_support_ids[] __initconst = {
  1826. { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
  1827. {}
  1828. };
  1829. static int __init intel_pstate_init(void)
  1830. {
  1831. int rc;
  1832. if (no_load)
  1833. return -ENODEV;
  1834. if (x86_match_cpu(hwp_support_ids)) {
  1835. copy_cpu_funcs(&core_funcs);
  1836. if (!no_hwp) {
  1837. hwp_active++;
  1838. intel_pstate.attr = hwp_cpufreq_attrs;
  1839. goto hwp_cpu_matched;
  1840. }
  1841. } else {
  1842. const struct x86_cpu_id *id;
  1843. id = x86_match_cpu(intel_pstate_cpu_ids);
  1844. if (!id)
  1845. return -ENODEV;
  1846. copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
  1847. }
  1848. if (intel_pstate_msrs_not_valid())
  1849. return -ENODEV;
  1850. hwp_cpu_matched:
  1851. /*
  1852. * The Intel pstate driver will be ignored if the platform
  1853. * firmware has its own power management modes.
  1854. */
  1855. if (intel_pstate_platform_pwr_mgmt_exists())
  1856. return -ENODEV;
  1857. if (!hwp_active && hwp_only)
  1858. return -ENOTSUPP;
  1859. pr_info("Intel P-state driver initializing\n");
  1860. all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
  1861. if (!all_cpu_data)
  1862. return -ENOMEM;
  1863. intel_pstate_request_control_from_smm();
  1864. intel_pstate_sysfs_expose_params();
  1865. mutex_lock(&intel_pstate_driver_lock);
  1866. rc = intel_pstate_register_driver(default_driver);
  1867. mutex_unlock(&intel_pstate_driver_lock);
  1868. if (rc)
  1869. return rc;
  1870. if (hwp_active)
  1871. pr_info("HWP enabled\n");
  1872. return 0;
  1873. }
  1874. device_initcall(intel_pstate_init);
  1875. static int __init intel_pstate_setup(char *str)
  1876. {
  1877. if (!str)
  1878. return -EINVAL;
  1879. if (!strcmp(str, "disable")) {
  1880. no_load = 1;
  1881. } else if (!strcmp(str, "passive")) {
  1882. pr_info("Passive mode enabled\n");
  1883. default_driver = &intel_cpufreq;
  1884. no_hwp = 1;
  1885. }
  1886. if (!strcmp(str, "no_hwp")) {
  1887. pr_info("HWP disabled\n");
  1888. no_hwp = 1;
  1889. }
  1890. if (!strcmp(str, "force"))
  1891. force_load = 1;
  1892. if (!strcmp(str, "hwp_only"))
  1893. hwp_only = 1;
  1894. if (!strcmp(str, "per_cpu_perf_limits"))
  1895. per_cpu_limits = true;
  1896. #ifdef CONFIG_ACPI
  1897. if (!strcmp(str, "support_acpi_ppc"))
  1898. acpi_ppc = true;
  1899. #endif
  1900. return 0;
  1901. }
  1902. early_param("intel_pstate", intel_pstate_setup);
  1903. MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
  1904. MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
  1905. MODULE_LICENSE("GPL");