amd_powerplay.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "pp_debug.h"
  24. #include <linux/types.h>
  25. #include <linux/kernel.h>
  26. #include <linux/gfp.h>
  27. #include <linux/slab.h>
  28. #include "amd_shared.h"
  29. #include "amd_powerplay.h"
  30. #include "power_state.h"
  31. #include "amdgpu.h"
  32. #include "hwmgr.h"
  33. #define PP_DPM_DISABLED 0xCCCC
  34. static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
  35. enum amd_pm_state_type *user_state);
  36. static const struct amd_pm_funcs pp_dpm_funcs;
  37. static inline int pp_check(struct pp_hwmgr *hwmgr)
  38. {
  39. if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
  40. return -EINVAL;
  41. if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
  42. return PP_DPM_DISABLED;
  43. return 0;
  44. }
  45. static int amd_powerplay_create(struct amdgpu_device *adev)
  46. {
  47. struct pp_hwmgr *hwmgr;
  48. if (adev == NULL)
  49. return -EINVAL;
  50. hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
  51. if (hwmgr == NULL)
  52. return -ENOMEM;
  53. hwmgr->adev = adev;
  54. hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
  55. hwmgr->device = amdgpu_cgs_create_device(adev);
  56. mutex_init(&hwmgr->smu_lock);
  57. hwmgr->chip_family = adev->family;
  58. hwmgr->chip_id = adev->asic_type;
  59. hwmgr->feature_mask = amdgpu_pp_feature_mask;
  60. adev->powerplay.pp_handle = hwmgr;
  61. adev->powerplay.pp_funcs = &pp_dpm_funcs;
  62. return 0;
  63. }
  64. static int amd_powerplay_destroy(struct amdgpu_device *adev)
  65. {
  66. struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  67. kfree(hwmgr->hardcode_pp_table);
  68. hwmgr->hardcode_pp_table = NULL;
  69. kfree(hwmgr);
  70. hwmgr = NULL;
  71. return 0;
  72. }
  73. static int pp_early_init(void *handle)
  74. {
  75. int ret;
  76. struct amdgpu_device *adev = handle;
  77. ret = amd_powerplay_create(adev);
  78. if (ret != 0)
  79. return ret;
  80. ret = hwmgr_early_init(adev->powerplay.pp_handle);
  81. if (ret)
  82. return -EINVAL;
  83. return 0;
  84. }
  85. static int pp_sw_init(void *handle)
  86. {
  87. struct amdgpu_device *adev = handle;
  88. struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  89. int ret = 0;
  90. ret = pp_check(hwmgr);
  91. if (ret >= 0) {
  92. if (hwmgr->smumgr_funcs->smu_init == NULL)
  93. return -EINVAL;
  94. ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
  95. phm_register_irq_handlers(hwmgr);
  96. pr_debug("amdgpu: powerplay sw initialized\n");
  97. }
  98. return ret;
  99. }
  100. static int pp_sw_fini(void *handle)
  101. {
  102. struct amdgpu_device *adev = handle;
  103. struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  104. int ret = 0;
  105. ret = pp_check(hwmgr);
  106. if (ret >= 0) {
  107. if (hwmgr->smumgr_funcs->smu_fini != NULL)
  108. hwmgr->smumgr_funcs->smu_fini(hwmgr);
  109. }
  110. if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
  111. amdgpu_ucode_fini_bo(adev);
  112. return 0;
  113. }
  114. static int pp_hw_init(void *handle)
  115. {
  116. int ret = 0;
  117. struct amdgpu_device *adev = handle;
  118. struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  119. if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
  120. amdgpu_ucode_init_bo(adev);
  121. ret = pp_check(hwmgr);
  122. if (ret >= 0) {
  123. if (hwmgr->smumgr_funcs->start_smu == NULL)
  124. return -EINVAL;
  125. if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
  126. pr_err("smc start failed\n");
  127. hwmgr->smumgr_funcs->smu_fini(hwmgr);
  128. return -EINVAL;
  129. }
  130. if (ret == PP_DPM_DISABLED)
  131. goto exit;
  132. ret = hwmgr_hw_init(hwmgr);
  133. if (ret)
  134. goto exit;
  135. }
  136. return ret;
  137. exit:
  138. hwmgr->pm_en = 0;
  139. cgs_notify_dpm_enabled(hwmgr->device, false);
  140. return 0;
  141. }
  142. static int pp_hw_fini(void *handle)
  143. {
  144. struct amdgpu_device *adev = handle;
  145. struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  146. int ret = 0;
  147. ret = pp_check(hwmgr);
  148. if (ret == 0)
  149. hwmgr_hw_fini(hwmgr);
  150. return 0;
  151. }
  152. static int pp_late_init(void *handle)
  153. {
  154. struct amdgpu_device *adev = handle;
  155. struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  156. int ret = 0;
  157. ret = pp_check(hwmgr);
  158. if (ret == 0)
  159. pp_dpm_dispatch_tasks(hwmgr,
  160. AMD_PP_TASK_COMPLETE_INIT, NULL);
  161. return 0;
  162. }
  163. static void pp_late_fini(void *handle)
  164. {
  165. struct amdgpu_device *adev = handle;
  166. amd_powerplay_destroy(adev);
  167. }
  168. static bool pp_is_idle(void *handle)
  169. {
  170. return false;
  171. }
  172. static int pp_wait_for_idle(void *handle)
  173. {
  174. return 0;
  175. }
  176. static int pp_sw_reset(void *handle)
  177. {
  178. return 0;
  179. }
  180. static int pp_set_powergating_state(void *handle,
  181. enum amd_powergating_state state)
  182. {
  183. struct amdgpu_device *adev = handle;
  184. struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  185. int ret = 0;
  186. ret = pp_check(hwmgr);
  187. if (ret)
  188. return ret;
  189. if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
  190. pr_info("%s was not implemented.\n", __func__);
  191. return 0;
  192. }
  193. /* Enable/disable GFX per cu powergating through SMU */
  194. return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
  195. state == AMD_PG_STATE_GATE);
  196. }
  197. static int pp_suspend(void *handle)
  198. {
  199. struct amdgpu_device *adev = handle;
  200. struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  201. int ret = 0;
  202. ret = pp_check(hwmgr);
  203. if (ret == 0)
  204. hwmgr_hw_suspend(hwmgr);
  205. return 0;
  206. }
  207. static int pp_resume(void *handle)
  208. {
  209. struct amdgpu_device *adev = handle;
  210. struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
  211. int ret;
  212. ret = pp_check(hwmgr);
  213. if (ret < 0)
  214. return ret;
  215. if (hwmgr->smumgr_funcs->start_smu == NULL)
  216. return -EINVAL;
  217. if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
  218. pr_err("smc start failed\n");
  219. hwmgr->smumgr_funcs->smu_fini(hwmgr);
  220. return -EINVAL;
  221. }
  222. if (ret == PP_DPM_DISABLED)
  223. return 0;
  224. return hwmgr_hw_resume(hwmgr);
  225. }
  226. static int pp_set_clockgating_state(void *handle,
  227. enum amd_clockgating_state state)
  228. {
  229. return 0;
  230. }
  231. static const struct amd_ip_funcs pp_ip_funcs = {
  232. .name = "powerplay",
  233. .early_init = pp_early_init,
  234. .late_init = pp_late_init,
  235. .sw_init = pp_sw_init,
  236. .sw_fini = pp_sw_fini,
  237. .hw_init = pp_hw_init,
  238. .hw_fini = pp_hw_fini,
  239. .late_fini = pp_late_fini,
  240. .suspend = pp_suspend,
  241. .resume = pp_resume,
  242. .is_idle = pp_is_idle,
  243. .wait_for_idle = pp_wait_for_idle,
  244. .soft_reset = pp_sw_reset,
  245. .set_clockgating_state = pp_set_clockgating_state,
  246. .set_powergating_state = pp_set_powergating_state,
  247. };
  248. const struct amdgpu_ip_block_version pp_smu_ip_block =
  249. {
  250. .type = AMD_IP_BLOCK_TYPE_SMC,
  251. .major = 1,
  252. .minor = 0,
  253. .rev = 0,
  254. .funcs = &pp_ip_funcs,
  255. };
  256. static int pp_dpm_load_fw(void *handle)
  257. {
  258. return 0;
  259. }
  260. static int pp_dpm_fw_loading_complete(void *handle)
  261. {
  262. return 0;
  263. }
  264. static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
  265. {
  266. struct pp_hwmgr *hwmgr = handle;
  267. int ret = 0;
  268. ret = pp_check(hwmgr);
  269. if (ret)
  270. return ret;
  271. if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
  272. pr_info("%s was not implemented.\n", __func__);
  273. return 0;
  274. }
  275. return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
  276. }
  277. static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
  278. enum amd_dpm_forced_level *level)
  279. {
  280. uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
  281. AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
  282. AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
  283. AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
  284. if (!(hwmgr->dpm_level & profile_mode_mask)) {
  285. /* enter umd pstate, save current level, disable gfx cg*/
  286. if (*level & profile_mode_mask) {
  287. hwmgr->saved_dpm_level = hwmgr->dpm_level;
  288. hwmgr->en_umd_pstate = true;
  289. cgs_set_clockgating_state(hwmgr->device,
  290. AMD_IP_BLOCK_TYPE_GFX,
  291. AMD_CG_STATE_UNGATE);
  292. cgs_set_powergating_state(hwmgr->device,
  293. AMD_IP_BLOCK_TYPE_GFX,
  294. AMD_PG_STATE_UNGATE);
  295. }
  296. } else {
  297. /* exit umd pstate, restore level, enable gfx cg*/
  298. if (!(*level & profile_mode_mask)) {
  299. if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
  300. *level = hwmgr->saved_dpm_level;
  301. hwmgr->en_umd_pstate = false;
  302. cgs_set_clockgating_state(hwmgr->device,
  303. AMD_IP_BLOCK_TYPE_GFX,
  304. AMD_CG_STATE_GATE);
  305. cgs_set_powergating_state(hwmgr->device,
  306. AMD_IP_BLOCK_TYPE_GFX,
  307. AMD_PG_STATE_GATE);
  308. }
  309. }
  310. }
  311. static int pp_dpm_force_performance_level(void *handle,
  312. enum amd_dpm_forced_level level)
  313. {
  314. struct pp_hwmgr *hwmgr = handle;
  315. int ret = 0;
  316. ret = pp_check(hwmgr);
  317. if (ret)
  318. return ret;
  319. if (level == hwmgr->dpm_level)
  320. return 0;
  321. mutex_lock(&hwmgr->smu_lock);
  322. pp_dpm_en_umd_pstate(hwmgr, &level);
  323. hwmgr->request_dpm_level = level;
  324. hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
  325. mutex_unlock(&hwmgr->smu_lock);
  326. return 0;
  327. }
  328. static enum amd_dpm_forced_level pp_dpm_get_performance_level(
  329. void *handle)
  330. {
  331. struct pp_hwmgr *hwmgr = handle;
  332. int ret = 0;
  333. enum amd_dpm_forced_level level;
  334. ret = pp_check(hwmgr);
  335. if (ret)
  336. return ret;
  337. mutex_lock(&hwmgr->smu_lock);
  338. level = hwmgr->dpm_level;
  339. mutex_unlock(&hwmgr->smu_lock);
  340. return level;
  341. }
  342. static uint32_t pp_dpm_get_sclk(void *handle, bool low)
  343. {
  344. struct pp_hwmgr *hwmgr = handle;
  345. int ret = 0;
  346. uint32_t clk = 0;
  347. ret = pp_check(hwmgr);
  348. if (ret)
  349. return ret;
  350. if (hwmgr->hwmgr_func->get_sclk == NULL) {
  351. pr_info("%s was not implemented.\n", __func__);
  352. return 0;
  353. }
  354. mutex_lock(&hwmgr->smu_lock);
  355. clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
  356. mutex_unlock(&hwmgr->smu_lock);
  357. return clk;
  358. }
  359. static uint32_t pp_dpm_get_mclk(void *handle, bool low)
  360. {
  361. struct pp_hwmgr *hwmgr = handle;
  362. int ret = 0;
  363. uint32_t clk = 0;
  364. ret = pp_check(hwmgr);
  365. if (ret)
  366. return ret;
  367. if (hwmgr->hwmgr_func->get_mclk == NULL) {
  368. pr_info("%s was not implemented.\n", __func__);
  369. return 0;
  370. }
  371. mutex_lock(&hwmgr->smu_lock);
  372. clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
  373. mutex_unlock(&hwmgr->smu_lock);
  374. return clk;
  375. }
  376. static void pp_dpm_powergate_vce(void *handle, bool gate)
  377. {
  378. struct pp_hwmgr *hwmgr = handle;
  379. int ret = 0;
  380. ret = pp_check(hwmgr);
  381. if (ret)
  382. return;
  383. if (hwmgr->hwmgr_func->powergate_vce == NULL) {
  384. pr_info("%s was not implemented.\n", __func__);
  385. return;
  386. }
  387. mutex_lock(&hwmgr->smu_lock);
  388. hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
  389. mutex_unlock(&hwmgr->smu_lock);
  390. }
  391. static void pp_dpm_powergate_uvd(void *handle, bool gate)
  392. {
  393. struct pp_hwmgr *hwmgr = handle;
  394. int ret = 0;
  395. ret = pp_check(hwmgr);
  396. if (ret)
  397. return;
  398. if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
  399. pr_info("%s was not implemented.\n", __func__);
  400. return;
  401. }
  402. mutex_lock(&hwmgr->smu_lock);
  403. hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
  404. mutex_unlock(&hwmgr->smu_lock);
  405. }
  406. static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
  407. enum amd_pm_state_type *user_state)
  408. {
  409. int ret = 0;
  410. struct pp_hwmgr *hwmgr = handle;
  411. ret = pp_check(hwmgr);
  412. if (ret)
  413. return ret;
  414. mutex_lock(&hwmgr->smu_lock);
  415. ret = hwmgr_handle_task(hwmgr, task_id, user_state);
  416. mutex_unlock(&hwmgr->smu_lock);
  417. return ret;
  418. }
  419. static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
  420. {
  421. struct pp_hwmgr *hwmgr = handle;
  422. struct pp_power_state *state;
  423. int ret = 0;
  424. enum amd_pm_state_type pm_type;
  425. ret = pp_check(hwmgr);
  426. if (ret)
  427. return ret;
  428. if (hwmgr->current_ps == NULL)
  429. return -EINVAL;
  430. mutex_lock(&hwmgr->smu_lock);
  431. state = hwmgr->current_ps;
  432. switch (state->classification.ui_label) {
  433. case PP_StateUILabel_Battery:
  434. pm_type = POWER_STATE_TYPE_BATTERY;
  435. break;
  436. case PP_StateUILabel_Balanced:
  437. pm_type = POWER_STATE_TYPE_BALANCED;
  438. break;
  439. case PP_StateUILabel_Performance:
  440. pm_type = POWER_STATE_TYPE_PERFORMANCE;
  441. break;
  442. default:
  443. if (state->classification.flags & PP_StateClassificationFlag_Boot)
  444. pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
  445. else
  446. pm_type = POWER_STATE_TYPE_DEFAULT;
  447. break;
  448. }
  449. mutex_unlock(&hwmgr->smu_lock);
  450. return pm_type;
  451. }
  452. static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
  453. {
  454. struct pp_hwmgr *hwmgr = handle;
  455. int ret = 0;
  456. ret = pp_check(hwmgr);
  457. if (ret)
  458. return;
  459. if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
  460. pr_info("%s was not implemented.\n", __func__);
  461. return;
  462. }
  463. mutex_lock(&hwmgr->smu_lock);
  464. hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
  465. mutex_unlock(&hwmgr->smu_lock);
  466. }
  467. static uint32_t pp_dpm_get_fan_control_mode(void *handle)
  468. {
  469. struct pp_hwmgr *hwmgr = handle;
  470. int ret = 0;
  471. uint32_t mode = 0;
  472. ret = pp_check(hwmgr);
  473. if (ret)
  474. return ret;
  475. if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
  476. pr_info("%s was not implemented.\n", __func__);
  477. return 0;
  478. }
  479. mutex_lock(&hwmgr->smu_lock);
  480. mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
  481. mutex_unlock(&hwmgr->smu_lock);
  482. return mode;
  483. }
  484. static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
  485. {
  486. struct pp_hwmgr *hwmgr = handle;
  487. int ret = 0;
  488. ret = pp_check(hwmgr);
  489. if (ret)
  490. return ret;
  491. if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
  492. pr_info("%s was not implemented.\n", __func__);
  493. return 0;
  494. }
  495. mutex_lock(&hwmgr->smu_lock);
  496. ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
  497. mutex_unlock(&hwmgr->smu_lock);
  498. return ret;
  499. }
  500. static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
  501. {
  502. struct pp_hwmgr *hwmgr = handle;
  503. int ret = 0;
  504. ret = pp_check(hwmgr);
  505. if (ret)
  506. return ret;
  507. if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
  508. pr_info("%s was not implemented.\n", __func__);
  509. return 0;
  510. }
  511. mutex_lock(&hwmgr->smu_lock);
  512. ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
  513. mutex_unlock(&hwmgr->smu_lock);
  514. return ret;
  515. }
  516. static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
  517. {
  518. struct pp_hwmgr *hwmgr = handle;
  519. int ret = 0;
  520. ret = pp_check(hwmgr);
  521. if (ret)
  522. return ret;
  523. if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
  524. return -EINVAL;
  525. mutex_lock(&hwmgr->smu_lock);
  526. ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
  527. mutex_unlock(&hwmgr->smu_lock);
  528. return ret;
  529. }
  530. static int pp_dpm_get_pp_num_states(void *handle,
  531. struct pp_states_info *data)
  532. {
  533. struct pp_hwmgr *hwmgr = handle;
  534. int i;
  535. int ret = 0;
  536. memset(data, 0, sizeof(*data));
  537. ret = pp_check(hwmgr);
  538. if (ret)
  539. return ret;
  540. if (hwmgr->ps == NULL)
  541. return -EINVAL;
  542. mutex_lock(&hwmgr->smu_lock);
  543. data->nums = hwmgr->num_ps;
  544. for (i = 0; i < hwmgr->num_ps; i++) {
  545. struct pp_power_state *state = (struct pp_power_state *)
  546. ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
  547. switch (state->classification.ui_label) {
  548. case PP_StateUILabel_Battery:
  549. data->states[i] = POWER_STATE_TYPE_BATTERY;
  550. break;
  551. case PP_StateUILabel_Balanced:
  552. data->states[i] = POWER_STATE_TYPE_BALANCED;
  553. break;
  554. case PP_StateUILabel_Performance:
  555. data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
  556. break;
  557. default:
  558. if (state->classification.flags & PP_StateClassificationFlag_Boot)
  559. data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
  560. else
  561. data->states[i] = POWER_STATE_TYPE_DEFAULT;
  562. }
  563. }
  564. mutex_unlock(&hwmgr->smu_lock);
  565. return 0;
  566. }
  567. static int pp_dpm_get_pp_table(void *handle, char **table)
  568. {
  569. struct pp_hwmgr *hwmgr = handle;
  570. int ret = 0;
  571. int size = 0;
  572. ret = pp_check(hwmgr);
  573. if (ret)
  574. return ret;
  575. if (!hwmgr->soft_pp_table)
  576. return -EINVAL;
  577. mutex_lock(&hwmgr->smu_lock);
  578. *table = (char *)hwmgr->soft_pp_table;
  579. size = hwmgr->soft_pp_table_size;
  580. mutex_unlock(&hwmgr->smu_lock);
  581. return size;
  582. }
  583. static int amd_powerplay_reset(void *handle)
  584. {
  585. struct pp_hwmgr *hwmgr = handle;
  586. int ret;
  587. ret = pp_check(hwmgr);
  588. if (ret)
  589. return ret;
  590. ret = hwmgr_hw_fini(hwmgr);
  591. if (ret)
  592. return ret;
  593. ret = hwmgr_hw_init(hwmgr);
  594. if (ret)
  595. return ret;
  596. return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
  597. }
  598. static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
  599. {
  600. struct pp_hwmgr *hwmgr = handle;
  601. int ret = 0;
  602. ret = pp_check(hwmgr);
  603. if (ret)
  604. return ret;
  605. mutex_lock(&hwmgr->smu_lock);
  606. if (!hwmgr->hardcode_pp_table) {
  607. hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
  608. hwmgr->soft_pp_table_size,
  609. GFP_KERNEL);
  610. if (!hwmgr->hardcode_pp_table) {
  611. mutex_unlock(&hwmgr->smu_lock);
  612. return -ENOMEM;
  613. }
  614. }
  615. memcpy(hwmgr->hardcode_pp_table, buf, size);
  616. hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
  617. mutex_unlock(&hwmgr->smu_lock);
  618. ret = amd_powerplay_reset(handle);
  619. if (ret)
  620. return ret;
  621. if (hwmgr->hwmgr_func->avfs_control) {
  622. ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
  623. if (ret)
  624. return ret;
  625. }
  626. return 0;
  627. }
  628. static int pp_dpm_force_clock_level(void *handle,
  629. enum pp_clock_type type, uint32_t mask)
  630. {
  631. struct pp_hwmgr *hwmgr = handle;
  632. int ret = 0;
  633. ret = pp_check(hwmgr);
  634. if (ret)
  635. return ret;
  636. if (hwmgr->hwmgr_func->force_clock_level == NULL) {
  637. pr_info("%s was not implemented.\n", __func__);
  638. return 0;
  639. }
  640. mutex_lock(&hwmgr->smu_lock);
  641. if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
  642. ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
  643. else
  644. ret = -EINVAL;
  645. mutex_unlock(&hwmgr->smu_lock);
  646. return ret;
  647. }
  648. static int pp_dpm_print_clock_levels(void *handle,
  649. enum pp_clock_type type, char *buf)
  650. {
  651. struct pp_hwmgr *hwmgr = handle;
  652. int ret = 0;
  653. ret = pp_check(hwmgr);
  654. if (ret)
  655. return ret;
  656. if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
  657. pr_info("%s was not implemented.\n", __func__);
  658. return 0;
  659. }
  660. mutex_lock(&hwmgr->smu_lock);
  661. ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
  662. mutex_unlock(&hwmgr->smu_lock);
  663. return ret;
  664. }
  665. static int pp_dpm_get_sclk_od(void *handle)
  666. {
  667. struct pp_hwmgr *hwmgr = handle;
  668. int ret = 0;
  669. ret = pp_check(hwmgr);
  670. if (ret)
  671. return ret;
  672. if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
  673. pr_info("%s was not implemented.\n", __func__);
  674. return 0;
  675. }
  676. mutex_lock(&hwmgr->smu_lock);
  677. ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
  678. mutex_unlock(&hwmgr->smu_lock);
  679. return ret;
  680. }
  681. static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
  682. {
  683. struct pp_hwmgr *hwmgr = handle;
  684. int ret = 0;
  685. ret = pp_check(hwmgr);
  686. if (ret)
  687. return ret;
  688. if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
  689. pr_info("%s was not implemented.\n", __func__);
  690. return 0;
  691. }
  692. mutex_lock(&hwmgr->smu_lock);
  693. ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
  694. mutex_unlock(&hwmgr->smu_lock);
  695. return ret;
  696. }
  697. static int pp_dpm_get_mclk_od(void *handle)
  698. {
  699. struct pp_hwmgr *hwmgr = handle;
  700. int ret = 0;
  701. ret = pp_check(hwmgr);
  702. if (ret)
  703. return ret;
  704. if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
  705. pr_info("%s was not implemented.\n", __func__);
  706. return 0;
  707. }
  708. mutex_lock(&hwmgr->smu_lock);
  709. ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
  710. mutex_unlock(&hwmgr->smu_lock);
  711. return ret;
  712. }
  713. static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
  714. {
  715. struct pp_hwmgr *hwmgr = handle;
  716. int ret = 0;
  717. ret = pp_check(hwmgr);
  718. if (ret)
  719. return ret;
  720. if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
  721. pr_info("%s was not implemented.\n", __func__);
  722. return 0;
  723. }
  724. mutex_lock(&hwmgr->smu_lock);
  725. ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
  726. mutex_unlock(&hwmgr->smu_lock);
  727. return ret;
  728. }
  729. static int pp_dpm_read_sensor(void *handle, int idx,
  730. void *value, int *size)
  731. {
  732. struct pp_hwmgr *hwmgr = handle;
  733. int ret = 0;
  734. ret = pp_check(hwmgr);
  735. if (ret)
  736. return ret;
  737. if (value == NULL)
  738. return -EINVAL;
  739. switch (idx) {
  740. case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
  741. *((uint32_t *)value) = hwmgr->pstate_sclk;
  742. return 0;
  743. case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
  744. *((uint32_t *)value) = hwmgr->pstate_mclk;
  745. return 0;
  746. default:
  747. mutex_lock(&hwmgr->smu_lock);
  748. ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
  749. mutex_unlock(&hwmgr->smu_lock);
  750. return ret;
  751. }
  752. }
  753. static struct amd_vce_state*
  754. pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
  755. {
  756. struct pp_hwmgr *hwmgr = handle;
  757. int ret = 0;
  758. ret = pp_check(hwmgr);
  759. if (ret)
  760. return NULL;
  761. if (hwmgr && idx < hwmgr->num_vce_state_tables)
  762. return &hwmgr->vce_states[idx];
  763. return NULL;
  764. }
  765. static int pp_get_power_profile_mode(void *handle, char *buf)
  766. {
  767. struct pp_hwmgr *hwmgr = handle;
  768. if (!buf || pp_check(hwmgr))
  769. return -EINVAL;
  770. if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
  771. pr_info("%s was not implemented.\n", __func__);
  772. return snprintf(buf, PAGE_SIZE, "\n");
  773. }
  774. return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
  775. }
  776. static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
  777. {
  778. struct pp_hwmgr *hwmgr = handle;
  779. int ret = -EINVAL;
  780. if (pp_check(hwmgr))
  781. return -EINVAL;
  782. if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
  783. pr_info("%s was not implemented.\n", __func__);
  784. return -EINVAL;
  785. }
  786. mutex_lock(&hwmgr->smu_lock);
  787. if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
  788. ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
  789. mutex_unlock(&hwmgr->smu_lock);
  790. return ret;
  791. }
  792. static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
  793. {
  794. struct pp_hwmgr *hwmgr = handle;
  795. if (pp_check(hwmgr))
  796. return -EINVAL;
  797. if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
  798. pr_info("%s was not implemented.\n", __func__);
  799. return -EINVAL;
  800. }
  801. return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
  802. }
  803. static int pp_dpm_switch_power_profile(void *handle,
  804. enum PP_SMC_POWER_PROFILE type, bool en)
  805. {
  806. struct pp_hwmgr *hwmgr = handle;
  807. long workload;
  808. uint32_t index;
  809. if (pp_check(hwmgr))
  810. return -EINVAL;
  811. if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
  812. pr_info("%s was not implemented.\n", __func__);
  813. return -EINVAL;
  814. }
  815. if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
  816. return -EINVAL;
  817. mutex_lock(&hwmgr->smu_lock);
  818. if (!en) {
  819. hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
  820. index = fls(hwmgr->workload_mask);
  821. index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
  822. workload = hwmgr->workload_setting[index];
  823. } else {
  824. hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
  825. index = fls(hwmgr->workload_mask);
  826. index = index <= Workload_Policy_Max ? index - 1 : 0;
  827. workload = hwmgr->workload_setting[index];
  828. }
  829. if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
  830. hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
  831. mutex_unlock(&hwmgr->smu_lock);
  832. return 0;
  833. }
  834. static int pp_dpm_notify_smu_memory_info(void *handle,
  835. uint32_t virtual_addr_low,
  836. uint32_t virtual_addr_hi,
  837. uint32_t mc_addr_low,
  838. uint32_t mc_addr_hi,
  839. uint32_t size)
  840. {
  841. struct pp_hwmgr *hwmgr = handle;
  842. int ret = 0;
  843. ret = pp_check(hwmgr);
  844. if (ret)
  845. return ret;
  846. if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
  847. pr_info("%s was not implemented.\n", __func__);
  848. return -EINVAL;
  849. }
  850. mutex_lock(&hwmgr->smu_lock);
  851. ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
  852. virtual_addr_hi, mc_addr_low, mc_addr_hi,
  853. size);
  854. mutex_unlock(&hwmgr->smu_lock);
  855. return ret;
  856. }
  857. static int pp_set_power_limit(void *handle, uint32_t limit)
  858. {
  859. struct pp_hwmgr *hwmgr = handle;
  860. int ret = 0;
  861. ret = pp_check(hwmgr);
  862. if (ret)
  863. return ret;
  864. if (hwmgr->hwmgr_func->set_power_limit == NULL) {
  865. pr_info("%s was not implemented.\n", __func__);
  866. return -EINVAL;
  867. }
  868. if (limit == 0)
  869. limit = hwmgr->default_power_limit;
  870. if (limit > hwmgr->default_power_limit)
  871. return -EINVAL;
  872. mutex_lock(&hwmgr->smu_lock);
  873. hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
  874. hwmgr->power_limit = limit;
  875. mutex_unlock(&hwmgr->smu_lock);
  876. return ret;
  877. }
  878. static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
  879. {
  880. struct pp_hwmgr *hwmgr = handle;
  881. int ret = 0;
  882. ret = pp_check(hwmgr);
  883. if (ret)
  884. return ret;
  885. if (limit == NULL)
  886. return -EINVAL;
  887. mutex_lock(&hwmgr->smu_lock);
  888. if (default_limit)
  889. *limit = hwmgr->default_power_limit;
  890. else
  891. *limit = hwmgr->power_limit;
  892. mutex_unlock(&hwmgr->smu_lock);
  893. return ret;
  894. }
  895. static int pp_display_configuration_change(void *handle,
  896. const struct amd_pp_display_configuration *display_config)
  897. {
  898. struct pp_hwmgr *hwmgr = handle;
  899. int ret = 0;
  900. ret = pp_check(hwmgr);
  901. if (ret)
  902. return ret;
  903. mutex_lock(&hwmgr->smu_lock);
  904. phm_store_dal_configuration_data(hwmgr, display_config);
  905. mutex_unlock(&hwmgr->smu_lock);
  906. return 0;
  907. }
  908. static int pp_get_display_power_level(void *handle,
  909. struct amd_pp_simple_clock_info *output)
  910. {
  911. struct pp_hwmgr *hwmgr = handle;
  912. int ret = 0;
  913. ret = pp_check(hwmgr);
  914. if (ret)
  915. return ret;
  916. if (output == NULL)
  917. return -EINVAL;
  918. mutex_lock(&hwmgr->smu_lock);
  919. ret = phm_get_dal_power_level(hwmgr, output);
  920. mutex_unlock(&hwmgr->smu_lock);
  921. return ret;
  922. }
  923. static int pp_get_current_clocks(void *handle,
  924. struct amd_pp_clock_info *clocks)
  925. {
  926. struct amd_pp_simple_clock_info simple_clocks;
  927. struct pp_clock_info hw_clocks;
  928. struct pp_hwmgr *hwmgr = handle;
  929. int ret = 0;
  930. ret = pp_check(hwmgr);
  931. if (ret)
  932. return ret;
  933. mutex_lock(&hwmgr->smu_lock);
  934. phm_get_dal_power_level(hwmgr, &simple_clocks);
  935. if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
  936. PHM_PlatformCaps_PowerContainment))
  937. ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
  938. &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
  939. else
  940. ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
  941. &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
  942. if (ret) {
  943. pr_info("Error in phm_get_clock_info \n");
  944. mutex_unlock(&hwmgr->smu_lock);
  945. return -EINVAL;
  946. }
  947. clocks->min_engine_clock = hw_clocks.min_eng_clk;
  948. clocks->max_engine_clock = hw_clocks.max_eng_clk;
  949. clocks->min_memory_clock = hw_clocks.min_mem_clk;
  950. clocks->max_memory_clock = hw_clocks.max_mem_clk;
  951. clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
  952. clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
  953. clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
  954. clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
  955. clocks->max_clocks_state = simple_clocks.level;
  956. if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
  957. clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
  958. clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
  959. }
  960. mutex_unlock(&hwmgr->smu_lock);
  961. return 0;
  962. }
  963. static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
  964. {
  965. struct pp_hwmgr *hwmgr = handle;
  966. int ret = 0;
  967. ret = pp_check(hwmgr);
  968. if (ret)
  969. return ret;
  970. if (clocks == NULL)
  971. return -EINVAL;
  972. mutex_lock(&hwmgr->smu_lock);
  973. ret = phm_get_clock_by_type(hwmgr, type, clocks);
  974. mutex_unlock(&hwmgr->smu_lock);
  975. return ret;
  976. }
  977. static int pp_get_clock_by_type_with_latency(void *handle,
  978. enum amd_pp_clock_type type,
  979. struct pp_clock_levels_with_latency *clocks)
  980. {
  981. struct pp_hwmgr *hwmgr = handle;
  982. int ret = 0;
  983. ret = pp_check(hwmgr);
  984. if (ret)
  985. return ret;
  986. if (!clocks)
  987. return -EINVAL;
  988. mutex_lock(&hwmgr->smu_lock);
  989. ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
  990. mutex_unlock(&hwmgr->smu_lock);
  991. return ret;
  992. }
  993. static int pp_get_clock_by_type_with_voltage(void *handle,
  994. enum amd_pp_clock_type type,
  995. struct pp_clock_levels_with_voltage *clocks)
  996. {
  997. struct pp_hwmgr *hwmgr = handle;
  998. int ret = 0;
  999. ret = pp_check(hwmgr);
  1000. if (ret)
  1001. return ret;
  1002. if (!clocks)
  1003. return -EINVAL;
  1004. mutex_lock(&hwmgr->smu_lock);
  1005. ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
  1006. mutex_unlock(&hwmgr->smu_lock);
  1007. return ret;
  1008. }
  1009. static int pp_set_watermarks_for_clocks_ranges(void *handle,
  1010. struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
  1011. {
  1012. struct pp_hwmgr *hwmgr = handle;
  1013. int ret = 0;
  1014. ret = pp_check(hwmgr);
  1015. if (ret)
  1016. return ret;
  1017. if (!wm_with_clock_ranges)
  1018. return -EINVAL;
  1019. mutex_lock(&hwmgr->smu_lock);
  1020. ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
  1021. wm_with_clock_ranges);
  1022. mutex_unlock(&hwmgr->smu_lock);
  1023. return ret;
  1024. }
  1025. static int pp_display_clock_voltage_request(void *handle,
  1026. struct pp_display_clock_request *clock)
  1027. {
  1028. struct pp_hwmgr *hwmgr = handle;
  1029. int ret = 0;
  1030. ret = pp_check(hwmgr);
  1031. if (ret)
  1032. return ret;
  1033. if (!clock)
  1034. return -EINVAL;
  1035. mutex_lock(&hwmgr->smu_lock);
  1036. ret = phm_display_clock_voltage_request(hwmgr, clock);
  1037. mutex_unlock(&hwmgr->smu_lock);
  1038. return ret;
  1039. }
  1040. static int pp_get_display_mode_validation_clocks(void *handle,
  1041. struct amd_pp_simple_clock_info *clocks)
  1042. {
  1043. struct pp_hwmgr *hwmgr = handle;
  1044. int ret = 0;
  1045. ret = pp_check(hwmgr);
  1046. if (ret)
  1047. return ret;
  1048. if (clocks == NULL)
  1049. return -EINVAL;
  1050. mutex_lock(&hwmgr->smu_lock);
  1051. if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
  1052. ret = phm_get_max_high_clocks(hwmgr, clocks);
  1053. mutex_unlock(&hwmgr->smu_lock);
  1054. return ret;
  1055. }
  1056. static int pp_set_mmhub_powergating_by_smu(void *handle)
  1057. {
  1058. struct pp_hwmgr *hwmgr = handle;
  1059. int ret = 0;
  1060. ret = pp_check(hwmgr);
  1061. if (ret)
  1062. return ret;
  1063. if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
  1064. pr_info("%s was not implemented.\n", __func__);
  1065. return 0;
  1066. }
  1067. return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
  1068. }
  1069. static const struct amd_pm_funcs pp_dpm_funcs = {
  1070. .load_firmware = pp_dpm_load_fw,
  1071. .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
  1072. .force_performance_level = pp_dpm_force_performance_level,
  1073. .get_performance_level = pp_dpm_get_performance_level,
  1074. .get_current_power_state = pp_dpm_get_current_power_state,
  1075. .powergate_vce = pp_dpm_powergate_vce,
  1076. .powergate_uvd = pp_dpm_powergate_uvd,
  1077. .dispatch_tasks = pp_dpm_dispatch_tasks,
  1078. .set_fan_control_mode = pp_dpm_set_fan_control_mode,
  1079. .get_fan_control_mode = pp_dpm_get_fan_control_mode,
  1080. .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
  1081. .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
  1082. .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
  1083. .get_pp_num_states = pp_dpm_get_pp_num_states,
  1084. .get_pp_table = pp_dpm_get_pp_table,
  1085. .set_pp_table = pp_dpm_set_pp_table,
  1086. .force_clock_level = pp_dpm_force_clock_level,
  1087. .print_clock_levels = pp_dpm_print_clock_levels,
  1088. .get_sclk_od = pp_dpm_get_sclk_od,
  1089. .set_sclk_od = pp_dpm_set_sclk_od,
  1090. .get_mclk_od = pp_dpm_get_mclk_od,
  1091. .set_mclk_od = pp_dpm_set_mclk_od,
  1092. .read_sensor = pp_dpm_read_sensor,
  1093. .get_vce_clock_state = pp_dpm_get_vce_clock_state,
  1094. .switch_power_profile = pp_dpm_switch_power_profile,
  1095. .set_clockgating_by_smu = pp_set_clockgating_by_smu,
  1096. .notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
  1097. .get_power_profile_mode = pp_get_power_profile_mode,
  1098. .set_power_profile_mode = pp_set_power_profile_mode,
  1099. .odn_edit_dpm_table = pp_odn_edit_dpm_table,
  1100. .set_power_limit = pp_set_power_limit,
  1101. .get_power_limit = pp_get_power_limit,
  1102. /* export to DC */
  1103. .get_sclk = pp_dpm_get_sclk,
  1104. .get_mclk = pp_dpm_get_mclk,
  1105. .display_configuration_change = pp_display_configuration_change,
  1106. .get_display_power_level = pp_get_display_power_level,
  1107. .get_current_clocks = pp_get_current_clocks,
  1108. .get_clock_by_type = pp_get_clock_by_type,
  1109. .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
  1110. .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
  1111. .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
  1112. .display_clock_voltage_request = pp_display_clock_voltage_request,
  1113. .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
  1114. .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
  1115. };