amdgpu_pm.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227
  1. /*
  2. * Permission is hereby granted, free of charge, to any person obtaining a
  3. * copy of this software and associated documentation files (the "Software"),
  4. * to deal in the Software without restriction, including without limitation
  5. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  6. * and/or sell copies of the Software, and to permit persons to whom the
  7. * Software is furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  15. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  16. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  17. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  18. * OTHER DEALINGS IN THE SOFTWARE.
  19. *
  20. * Authors: Rafał Miłecki <zajec5@gmail.com>
  21. * Alex Deucher <alexdeucher@gmail.com>
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_drv.h"
  26. #include "amdgpu_pm.h"
  27. #include "amdgpu_dpm.h"
  28. #include "atom.h"
  29. #include <linux/power_supply.h>
  30. #include <linux/hwmon.h>
  31. #include <linux/hwmon-sysfs.h>
  32. #include "amd_powerplay.h"
  33. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
  34. void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
  35. {
  36. if (adev->pp_enabled)
  37. /* TODO */
  38. return;
  39. if (adev->pm.dpm_enabled) {
  40. mutex_lock(&adev->pm.mutex);
  41. if (power_supply_is_system_supplied() > 0)
  42. adev->pm.dpm.ac_power = true;
  43. else
  44. adev->pm.dpm.ac_power = false;
  45. if (adev->pm.funcs->enable_bapm)
  46. amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
  47. mutex_unlock(&adev->pm.mutex);
  48. }
  49. }
  50. static ssize_t amdgpu_get_dpm_state(struct device *dev,
  51. struct device_attribute *attr,
  52. char *buf)
  53. {
  54. struct drm_device *ddev = dev_get_drvdata(dev);
  55. struct amdgpu_device *adev = ddev->dev_private;
  56. enum amd_pm_state_type pm;
  57. if (adev->pp_enabled) {
  58. pm = amdgpu_dpm_get_current_power_state(adev);
  59. } else
  60. pm = adev->pm.dpm.user_state;
  61. return snprintf(buf, PAGE_SIZE, "%s\n",
  62. (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
  63. (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
  64. }
  65. static ssize_t amdgpu_set_dpm_state(struct device *dev,
  66. struct device_attribute *attr,
  67. const char *buf,
  68. size_t count)
  69. {
  70. struct drm_device *ddev = dev_get_drvdata(dev);
  71. struct amdgpu_device *adev = ddev->dev_private;
  72. enum amd_pm_state_type state;
  73. if (strncmp("battery", buf, strlen("battery")) == 0)
  74. state = POWER_STATE_TYPE_BATTERY;
  75. else if (strncmp("balanced", buf, strlen("balanced")) == 0)
  76. state = POWER_STATE_TYPE_BALANCED;
  77. else if (strncmp("performance", buf, strlen("performance")) == 0)
  78. state = POWER_STATE_TYPE_PERFORMANCE;
  79. else {
  80. count = -EINVAL;
  81. goto fail;
  82. }
  83. if (adev->pp_enabled) {
  84. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
  85. } else {
  86. mutex_lock(&adev->pm.mutex);
  87. adev->pm.dpm.user_state = state;
  88. mutex_unlock(&adev->pm.mutex);
  89. /* Can't set dpm state when the card is off */
  90. if (!(adev->flags & AMD_IS_PX) ||
  91. (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
  92. amdgpu_pm_compute_clocks(adev);
  93. }
  94. fail:
  95. return count;
  96. }
  97. static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
  98. struct device_attribute *attr,
  99. char *buf)
  100. {
  101. struct drm_device *ddev = dev_get_drvdata(dev);
  102. struct amdgpu_device *adev = ddev->dev_private;
  103. if ((adev->flags & AMD_IS_PX) &&
  104. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  105. return snprintf(buf, PAGE_SIZE, "off\n");
  106. if (adev->pp_enabled) {
  107. enum amd_dpm_forced_level level;
  108. level = amdgpu_dpm_get_performance_level(adev);
  109. return snprintf(buf, PAGE_SIZE, "%s\n",
  110. (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  111. (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
  112. (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
  113. (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown");
  114. } else {
  115. enum amdgpu_dpm_forced_level level;
  116. level = adev->pm.dpm.forced_level;
  117. return snprintf(buf, PAGE_SIZE, "%s\n",
  118. (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  119. (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
  120. }
  121. }
  122. static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
  123. struct device_attribute *attr,
  124. const char *buf,
  125. size_t count)
  126. {
  127. struct drm_device *ddev = dev_get_drvdata(dev);
  128. struct amdgpu_device *adev = ddev->dev_private;
  129. enum amdgpu_dpm_forced_level level;
  130. int ret = 0;
  131. /* Can't force performance level when the card is off */
  132. if ((adev->flags & AMD_IS_PX) &&
  133. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  134. return -EINVAL;
  135. if (strncmp("low", buf, strlen("low")) == 0) {
  136. level = AMDGPU_DPM_FORCED_LEVEL_LOW;
  137. } else if (strncmp("high", buf, strlen("high")) == 0) {
  138. level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
  139. } else if (strncmp("auto", buf, strlen("auto")) == 0) {
  140. level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
  141. } else if (strncmp("manual", buf, strlen("manual")) == 0) {
  142. level = AMDGPU_DPM_FORCED_LEVEL_MANUAL;
  143. } else {
  144. count = -EINVAL;
  145. goto fail;
  146. }
  147. if (adev->pp_enabled)
  148. amdgpu_dpm_force_performance_level(adev, level);
  149. else {
  150. mutex_lock(&adev->pm.mutex);
  151. if (adev->pm.dpm.thermal_active) {
  152. count = -EINVAL;
  153. mutex_unlock(&adev->pm.mutex);
  154. goto fail;
  155. }
  156. ret = amdgpu_dpm_force_performance_level(adev, level);
  157. if (ret)
  158. count = -EINVAL;
  159. else
  160. adev->pm.dpm.forced_level = level;
  161. mutex_unlock(&adev->pm.mutex);
  162. }
  163. fail:
  164. return count;
  165. }
  166. static ssize_t amdgpu_get_pp_num_states(struct device *dev,
  167. struct device_attribute *attr,
  168. char *buf)
  169. {
  170. struct drm_device *ddev = dev_get_drvdata(dev);
  171. struct amdgpu_device *adev = ddev->dev_private;
  172. struct pp_states_info data;
  173. int i, buf_len;
  174. if (adev->pp_enabled)
  175. amdgpu_dpm_get_pp_num_states(adev, &data);
  176. buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
  177. for (i = 0; i < data.nums; i++)
  178. buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
  179. (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
  180. (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
  181. (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
  182. (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
  183. return buf_len;
  184. }
  185. static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
  186. struct device_attribute *attr,
  187. char *buf)
  188. {
  189. struct drm_device *ddev = dev_get_drvdata(dev);
  190. struct amdgpu_device *adev = ddev->dev_private;
  191. struct pp_states_info data;
  192. enum amd_pm_state_type pm = 0;
  193. int i = 0;
  194. if (adev->pp_enabled) {
  195. pm = amdgpu_dpm_get_current_power_state(adev);
  196. amdgpu_dpm_get_pp_num_states(adev, &data);
  197. for (i = 0; i < data.nums; i++) {
  198. if (pm == data.states[i])
  199. break;
  200. }
  201. if (i == data.nums)
  202. i = -EINVAL;
  203. }
  204. return snprintf(buf, PAGE_SIZE, "%d\n", i);
  205. }
  206. static ssize_t amdgpu_get_pp_force_state(struct device *dev,
  207. struct device_attribute *attr,
  208. char *buf)
  209. {
  210. struct drm_device *ddev = dev_get_drvdata(dev);
  211. struct amdgpu_device *adev = ddev->dev_private;
  212. struct pp_states_info data;
  213. enum amd_pm_state_type pm = 0;
  214. int i;
  215. if (adev->pp_force_state_enabled && adev->pp_enabled) {
  216. pm = amdgpu_dpm_get_current_power_state(adev);
  217. amdgpu_dpm_get_pp_num_states(adev, &data);
  218. for (i = 0; i < data.nums; i++) {
  219. if (pm == data.states[i])
  220. break;
  221. }
  222. if (i == data.nums)
  223. i = -EINVAL;
  224. return snprintf(buf, PAGE_SIZE, "%d\n", i);
  225. } else
  226. return snprintf(buf, PAGE_SIZE, "\n");
  227. }
  228. static ssize_t amdgpu_set_pp_force_state(struct device *dev,
  229. struct device_attribute *attr,
  230. const char *buf,
  231. size_t count)
  232. {
  233. struct drm_device *ddev = dev_get_drvdata(dev);
  234. struct amdgpu_device *adev = ddev->dev_private;
  235. enum amd_pm_state_type state = 0;
  236. long idx;
  237. int ret;
  238. if (strlen(buf) == 1)
  239. adev->pp_force_state_enabled = false;
  240. else {
  241. ret = kstrtol(buf, 0, &idx);
  242. if (ret) {
  243. count = -EINVAL;
  244. goto fail;
  245. }
  246. if (adev->pp_enabled) {
  247. struct pp_states_info data;
  248. amdgpu_dpm_get_pp_num_states(adev, &data);
  249. state = data.states[idx];
  250. /* only set user selected power states */
  251. if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
  252. state != POWER_STATE_TYPE_DEFAULT) {
  253. amdgpu_dpm_dispatch_task(adev,
  254. AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
  255. adev->pp_force_state_enabled = true;
  256. }
  257. }
  258. }
  259. fail:
  260. return count;
  261. }
  262. static ssize_t amdgpu_get_pp_table(struct device *dev,
  263. struct device_attribute *attr,
  264. char *buf)
  265. {
  266. struct drm_device *ddev = dev_get_drvdata(dev);
  267. struct amdgpu_device *adev = ddev->dev_private;
  268. char *table = NULL;
  269. int size, i;
  270. if (adev->pp_enabled)
  271. size = amdgpu_dpm_get_pp_table(adev, &table);
  272. else
  273. return 0;
  274. if (size >= PAGE_SIZE)
  275. size = PAGE_SIZE - 1;
  276. for (i = 0; i < size; i++) {
  277. sprintf(buf + i, "%02x", table[i]);
  278. }
  279. sprintf(buf + i, "\n");
  280. return size;
  281. }
  282. static ssize_t amdgpu_set_pp_table(struct device *dev,
  283. struct device_attribute *attr,
  284. const char *buf,
  285. size_t count)
  286. {
  287. struct drm_device *ddev = dev_get_drvdata(dev);
  288. struct amdgpu_device *adev = ddev->dev_private;
  289. if (adev->pp_enabled)
  290. amdgpu_dpm_set_pp_table(adev, buf, count);
  291. return count;
  292. }
  293. static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
  294. struct device_attribute *attr,
  295. char *buf)
  296. {
  297. struct drm_device *ddev = dev_get_drvdata(dev);
  298. struct amdgpu_device *adev = ddev->dev_private;
  299. ssize_t size = 0;
  300. if (adev->pp_enabled)
  301. size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
  302. return size;
  303. }
  304. static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
  305. struct device_attribute *attr,
  306. const char *buf,
  307. size_t count)
  308. {
  309. struct drm_device *ddev = dev_get_drvdata(dev);
  310. struct amdgpu_device *adev = ddev->dev_private;
  311. int ret;
  312. long level;
  313. ret = kstrtol(buf, 0, &level);
  314. if (ret) {
  315. count = -EINVAL;
  316. goto fail;
  317. }
  318. if (adev->pp_enabled)
  319. amdgpu_dpm_force_clock_level(adev, PP_SCLK, level);
  320. fail:
  321. return count;
  322. }
  323. static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
  324. struct device_attribute *attr,
  325. char *buf)
  326. {
  327. struct drm_device *ddev = dev_get_drvdata(dev);
  328. struct amdgpu_device *adev = ddev->dev_private;
  329. ssize_t size = 0;
  330. if (adev->pp_enabled)
  331. size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
  332. return size;
  333. }
  334. static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
  335. struct device_attribute *attr,
  336. const char *buf,
  337. size_t count)
  338. {
  339. struct drm_device *ddev = dev_get_drvdata(dev);
  340. struct amdgpu_device *adev = ddev->dev_private;
  341. int ret;
  342. long level;
  343. ret = kstrtol(buf, 0, &level);
  344. if (ret) {
  345. count = -EINVAL;
  346. goto fail;
  347. }
  348. if (adev->pp_enabled)
  349. amdgpu_dpm_force_clock_level(adev, PP_MCLK, level);
  350. fail:
  351. return count;
  352. }
  353. static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
  354. struct device_attribute *attr,
  355. char *buf)
  356. {
  357. struct drm_device *ddev = dev_get_drvdata(dev);
  358. struct amdgpu_device *adev = ddev->dev_private;
  359. ssize_t size = 0;
  360. if (adev->pp_enabled)
  361. size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
  362. return size;
  363. }
  364. static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
  365. struct device_attribute *attr,
  366. const char *buf,
  367. size_t count)
  368. {
  369. struct drm_device *ddev = dev_get_drvdata(dev);
  370. struct amdgpu_device *adev = ddev->dev_private;
  371. int ret;
  372. long level;
  373. ret = kstrtol(buf, 0, &level);
  374. if (ret) {
  375. count = -EINVAL;
  376. goto fail;
  377. }
  378. if (adev->pp_enabled)
  379. amdgpu_dpm_force_clock_level(adev, PP_PCIE, level);
  380. fail:
  381. return count;
  382. }
  383. static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
  384. static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
  385. amdgpu_get_dpm_forced_performance_level,
  386. amdgpu_set_dpm_forced_performance_level);
  387. static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
  388. static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
  389. static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
  390. amdgpu_get_pp_force_state,
  391. amdgpu_set_pp_force_state);
  392. static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
  393. amdgpu_get_pp_table,
  394. amdgpu_set_pp_table);
  395. static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
  396. amdgpu_get_pp_dpm_sclk,
  397. amdgpu_set_pp_dpm_sclk);
  398. static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
  399. amdgpu_get_pp_dpm_mclk,
  400. amdgpu_set_pp_dpm_mclk);
  401. static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
  402. amdgpu_get_pp_dpm_pcie,
  403. amdgpu_set_pp_dpm_pcie);
  404. static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
  405. struct device_attribute *attr,
  406. char *buf)
  407. {
  408. struct amdgpu_device *adev = dev_get_drvdata(dev);
  409. struct drm_device *ddev = adev->ddev;
  410. int temp;
  411. /* Can't get temperature when the card is off */
  412. if ((adev->flags & AMD_IS_PX) &&
  413. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  414. return -EINVAL;
  415. if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
  416. temp = 0;
  417. else
  418. temp = amdgpu_dpm_get_temperature(adev);
  419. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  420. }
  421. static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
  422. struct device_attribute *attr,
  423. char *buf)
  424. {
  425. struct amdgpu_device *adev = dev_get_drvdata(dev);
  426. int hyst = to_sensor_dev_attr(attr)->index;
  427. int temp;
  428. if (hyst)
  429. temp = adev->pm.dpm.thermal.min_temp;
  430. else
  431. temp = adev->pm.dpm.thermal.max_temp;
  432. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  433. }
  434. static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
  435. struct device_attribute *attr,
  436. char *buf)
  437. {
  438. struct amdgpu_device *adev = dev_get_drvdata(dev);
  439. u32 pwm_mode = 0;
  440. if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
  441. return -EINVAL;
  442. pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
  443. /* never 0 (full-speed), fuse or smc-controlled always */
  444. return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
  445. }
  446. static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
  447. struct device_attribute *attr,
  448. const char *buf,
  449. size_t count)
  450. {
  451. struct amdgpu_device *adev = dev_get_drvdata(dev);
  452. int err;
  453. int value;
  454. if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
  455. return -EINVAL;
  456. err = kstrtoint(buf, 10, &value);
  457. if (err)
  458. return err;
  459. switch (value) {
  460. case 1: /* manual, percent-based */
  461. amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
  462. break;
  463. default: /* disable */
  464. amdgpu_dpm_set_fan_control_mode(adev, 0);
  465. break;
  466. }
  467. return count;
  468. }
  469. static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
  470. struct device_attribute *attr,
  471. char *buf)
  472. {
  473. return sprintf(buf, "%i\n", 0);
  474. }
  475. static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
  476. struct device_attribute *attr,
  477. char *buf)
  478. {
  479. return sprintf(buf, "%i\n", 255);
  480. }
  481. static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
  482. struct device_attribute *attr,
  483. const char *buf, size_t count)
  484. {
  485. struct amdgpu_device *adev = dev_get_drvdata(dev);
  486. int err;
  487. u32 value;
  488. err = kstrtou32(buf, 10, &value);
  489. if (err)
  490. return err;
  491. value = (value * 100) / 255;
  492. err = amdgpu_dpm_set_fan_speed_percent(adev, value);
  493. if (err)
  494. return err;
  495. return count;
  496. }
  497. static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
  498. struct device_attribute *attr,
  499. char *buf)
  500. {
  501. struct amdgpu_device *adev = dev_get_drvdata(dev);
  502. int err;
  503. u32 speed;
  504. err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
  505. if (err)
  506. return err;
  507. speed = (speed * 255) / 100;
  508. return sprintf(buf, "%i\n", speed);
  509. }
  510. static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
  511. static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
  512. static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
  513. static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
  514. static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
  515. static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
  516. static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
  517. static struct attribute *hwmon_attributes[] = {
  518. &sensor_dev_attr_temp1_input.dev_attr.attr,
  519. &sensor_dev_attr_temp1_crit.dev_attr.attr,
  520. &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
  521. &sensor_dev_attr_pwm1.dev_attr.attr,
  522. &sensor_dev_attr_pwm1_enable.dev_attr.attr,
  523. &sensor_dev_attr_pwm1_min.dev_attr.attr,
  524. &sensor_dev_attr_pwm1_max.dev_attr.attr,
  525. NULL
  526. };
  527. static umode_t hwmon_attributes_visible(struct kobject *kobj,
  528. struct attribute *attr, int index)
  529. {
  530. struct device *dev = kobj_to_dev(kobj);
  531. struct amdgpu_device *adev = dev_get_drvdata(dev);
  532. umode_t effective_mode = attr->mode;
  533. /* Skip limit attributes if DPM is not enabled */
  534. if (!adev->pm.dpm_enabled &&
  535. (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
  536. attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
  537. attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  538. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  539. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  540. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  541. return 0;
  542. if (adev->pp_enabled)
  543. return effective_mode;
  544. /* Skip fan attributes if fan is not present */
  545. if (adev->pm.no_fan &&
  546. (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  547. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  548. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  549. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  550. return 0;
  551. /* mask fan attributes if we have no bindings for this asic to expose */
  552. if ((!adev->pm.funcs->get_fan_speed_percent &&
  553. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
  554. (!adev->pm.funcs->get_fan_control_mode &&
  555. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
  556. effective_mode &= ~S_IRUGO;
  557. if ((!adev->pm.funcs->set_fan_speed_percent &&
  558. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
  559. (!adev->pm.funcs->set_fan_control_mode &&
  560. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
  561. effective_mode &= ~S_IWUSR;
  562. /* hide max/min values if we can't both query and manage the fan */
  563. if ((!adev->pm.funcs->set_fan_speed_percent &&
  564. !adev->pm.funcs->get_fan_speed_percent) &&
  565. (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  566. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  567. return 0;
  568. return effective_mode;
  569. }
  570. static const struct attribute_group hwmon_attrgroup = {
  571. .attrs = hwmon_attributes,
  572. .is_visible = hwmon_attributes_visible,
  573. };
  574. static const struct attribute_group *hwmon_groups[] = {
  575. &hwmon_attrgroup,
  576. NULL
  577. };
  578. void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
  579. {
  580. struct amdgpu_device *adev =
  581. container_of(work, struct amdgpu_device,
  582. pm.dpm.thermal.work);
  583. /* switch to the thermal state */
  584. enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
  585. if (!adev->pm.dpm_enabled)
  586. return;
  587. if (adev->pm.funcs->get_temperature) {
  588. int temp = amdgpu_dpm_get_temperature(adev);
  589. if (temp < adev->pm.dpm.thermal.min_temp)
  590. /* switch back the user state */
  591. dpm_state = adev->pm.dpm.user_state;
  592. } else {
  593. if (adev->pm.dpm.thermal.high_to_low)
  594. /* switch back the user state */
  595. dpm_state = adev->pm.dpm.user_state;
  596. }
  597. mutex_lock(&adev->pm.mutex);
  598. if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
  599. adev->pm.dpm.thermal_active = true;
  600. else
  601. adev->pm.dpm.thermal_active = false;
  602. adev->pm.dpm.state = dpm_state;
  603. mutex_unlock(&adev->pm.mutex);
  604. amdgpu_pm_compute_clocks(adev);
  605. }
  606. static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
  607. enum amd_pm_state_type dpm_state)
  608. {
  609. int i;
  610. struct amdgpu_ps *ps;
  611. u32 ui_class;
  612. bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
  613. true : false;
  614. /* check if the vblank period is too short to adjust the mclk */
  615. if (single_display && adev->pm.funcs->vblank_too_short) {
  616. if (amdgpu_dpm_vblank_too_short(adev))
  617. single_display = false;
  618. }
  619. /* certain older asics have a separare 3D performance state,
  620. * so try that first if the user selected performance
  621. */
  622. if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
  623. dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
  624. /* balanced states don't exist at the moment */
  625. if (dpm_state == POWER_STATE_TYPE_BALANCED)
  626. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  627. restart_search:
  628. /* Pick the best power state based on current conditions */
  629. for (i = 0; i < adev->pm.dpm.num_ps; i++) {
  630. ps = &adev->pm.dpm.ps[i];
  631. ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
  632. switch (dpm_state) {
  633. /* user states */
  634. case POWER_STATE_TYPE_BATTERY:
  635. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
  636. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  637. if (single_display)
  638. return ps;
  639. } else
  640. return ps;
  641. }
  642. break;
  643. case POWER_STATE_TYPE_BALANCED:
  644. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
  645. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  646. if (single_display)
  647. return ps;
  648. } else
  649. return ps;
  650. }
  651. break;
  652. case POWER_STATE_TYPE_PERFORMANCE:
  653. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
  654. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  655. if (single_display)
  656. return ps;
  657. } else
  658. return ps;
  659. }
  660. break;
  661. /* internal states */
  662. case POWER_STATE_TYPE_INTERNAL_UVD:
  663. if (adev->pm.dpm.uvd_ps)
  664. return adev->pm.dpm.uvd_ps;
  665. else
  666. break;
  667. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  668. if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  669. return ps;
  670. break;
  671. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  672. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  673. return ps;
  674. break;
  675. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  676. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  677. return ps;
  678. break;
  679. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  680. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  681. return ps;
  682. break;
  683. case POWER_STATE_TYPE_INTERNAL_BOOT:
  684. return adev->pm.dpm.boot_ps;
  685. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  686. if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  687. return ps;
  688. break;
  689. case POWER_STATE_TYPE_INTERNAL_ACPI:
  690. if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  691. return ps;
  692. break;
  693. case POWER_STATE_TYPE_INTERNAL_ULV:
  694. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  695. return ps;
  696. break;
  697. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  698. if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  699. return ps;
  700. break;
  701. default:
  702. break;
  703. }
  704. }
  705. /* use a fallback state if we didn't match */
  706. switch (dpm_state) {
  707. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  708. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
  709. goto restart_search;
  710. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  711. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  712. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  713. if (adev->pm.dpm.uvd_ps) {
  714. return adev->pm.dpm.uvd_ps;
  715. } else {
  716. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  717. goto restart_search;
  718. }
  719. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  720. dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
  721. goto restart_search;
  722. case POWER_STATE_TYPE_INTERNAL_ACPI:
  723. dpm_state = POWER_STATE_TYPE_BATTERY;
  724. goto restart_search;
  725. case POWER_STATE_TYPE_BATTERY:
  726. case POWER_STATE_TYPE_BALANCED:
  727. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  728. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  729. goto restart_search;
  730. default:
  731. break;
  732. }
  733. return NULL;
  734. }
  735. static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
  736. {
  737. int i;
  738. struct amdgpu_ps *ps;
  739. enum amd_pm_state_type dpm_state;
  740. int ret;
  741. /* if dpm init failed */
  742. if (!adev->pm.dpm_enabled)
  743. return;
  744. if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
  745. /* add other state override checks here */
  746. if ((!adev->pm.dpm.thermal_active) &&
  747. (!adev->pm.dpm.uvd_active))
  748. adev->pm.dpm.state = adev->pm.dpm.user_state;
  749. }
  750. dpm_state = adev->pm.dpm.state;
  751. ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
  752. if (ps)
  753. adev->pm.dpm.requested_ps = ps;
  754. else
  755. return;
  756. /* no need to reprogram if nothing changed unless we are on BTC+ */
  757. if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
  758. /* vce just modifies an existing state so force a change */
  759. if (ps->vce_active != adev->pm.dpm.vce_active)
  760. goto force;
  761. if (adev->flags & AMD_IS_APU) {
  762. /* for APUs if the num crtcs changed but state is the same,
  763. * all we need to do is update the display configuration.
  764. */
  765. if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
  766. /* update display watermarks based on new power state */
  767. amdgpu_display_bandwidth_update(adev);
  768. /* update displays */
  769. amdgpu_dpm_display_configuration_changed(adev);
  770. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  771. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  772. }
  773. return;
  774. } else {
  775. /* for BTC+ if the num crtcs hasn't changed and state is the same,
  776. * nothing to do, if the num crtcs is > 1 and state is the same,
  777. * update display configuration.
  778. */
  779. if (adev->pm.dpm.new_active_crtcs ==
  780. adev->pm.dpm.current_active_crtcs) {
  781. return;
  782. } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
  783. (adev->pm.dpm.new_active_crtc_count > 1)) {
  784. /* update display watermarks based on new power state */
  785. amdgpu_display_bandwidth_update(adev);
  786. /* update displays */
  787. amdgpu_dpm_display_configuration_changed(adev);
  788. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  789. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  790. return;
  791. }
  792. }
  793. }
  794. force:
  795. if (amdgpu_dpm == 1) {
  796. printk("switching from power state:\n");
  797. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
  798. printk("switching to power state:\n");
  799. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
  800. }
  801. /* update whether vce is active */
  802. ps->vce_active = adev->pm.dpm.vce_active;
  803. ret = amdgpu_dpm_pre_set_power_state(adev);
  804. if (ret)
  805. return;
  806. /* update display watermarks based on new power state */
  807. amdgpu_display_bandwidth_update(adev);
  808. /* wait for the rings to drain */
  809. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  810. struct amdgpu_ring *ring = adev->rings[i];
  811. if (ring && ring->ready)
  812. amdgpu_fence_wait_empty(ring);
  813. }
  814. /* program the new power state */
  815. amdgpu_dpm_set_power_state(adev);
  816. /* update current power state */
  817. adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
  818. amdgpu_dpm_post_set_power_state(adev);
  819. /* update displays */
  820. amdgpu_dpm_display_configuration_changed(adev);
  821. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  822. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  823. if (adev->pm.funcs->force_performance_level) {
  824. if (adev->pm.dpm.thermal_active) {
  825. enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
  826. /* force low perf level for thermal */
  827. amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
  828. /* save the user's level */
  829. adev->pm.dpm.forced_level = level;
  830. } else {
  831. /* otherwise, user selected level */
  832. amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
  833. }
  834. }
  835. }
  836. void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
  837. {
  838. if (adev->pp_enabled)
  839. amdgpu_dpm_powergate_uvd(adev, !enable);
  840. else {
  841. if (adev->pm.funcs->powergate_uvd) {
  842. mutex_lock(&adev->pm.mutex);
  843. /* enable/disable UVD */
  844. amdgpu_dpm_powergate_uvd(adev, !enable);
  845. mutex_unlock(&adev->pm.mutex);
  846. } else {
  847. if (enable) {
  848. mutex_lock(&adev->pm.mutex);
  849. adev->pm.dpm.uvd_active = true;
  850. adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
  851. mutex_unlock(&adev->pm.mutex);
  852. } else {
  853. mutex_lock(&adev->pm.mutex);
  854. adev->pm.dpm.uvd_active = false;
  855. mutex_unlock(&adev->pm.mutex);
  856. }
  857. amdgpu_pm_compute_clocks(adev);
  858. }
  859. }
  860. }
  861. void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
  862. {
  863. if (adev->pp_enabled)
  864. amdgpu_dpm_powergate_vce(adev, !enable);
  865. else {
  866. if (adev->pm.funcs->powergate_vce) {
  867. mutex_lock(&adev->pm.mutex);
  868. amdgpu_dpm_powergate_vce(adev, !enable);
  869. mutex_unlock(&adev->pm.mutex);
  870. } else {
  871. if (enable) {
  872. mutex_lock(&adev->pm.mutex);
  873. adev->pm.dpm.vce_active = true;
  874. /* XXX select vce level based on ring/task */
  875. adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
  876. mutex_unlock(&adev->pm.mutex);
  877. } else {
  878. mutex_lock(&adev->pm.mutex);
  879. adev->pm.dpm.vce_active = false;
  880. mutex_unlock(&adev->pm.mutex);
  881. }
  882. amdgpu_pm_compute_clocks(adev);
  883. }
  884. }
  885. }
  886. void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
  887. {
  888. int i;
  889. if (adev->pp_enabled)
  890. /* TO DO */
  891. return;
  892. for (i = 0; i < adev->pm.dpm.num_ps; i++)
  893. amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
  894. }
  895. int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
  896. {
  897. int ret;
  898. if (adev->pm.sysfs_initialized)
  899. return 0;
  900. if (!adev->pp_enabled) {
  901. if (adev->pm.funcs->get_temperature == NULL)
  902. return 0;
  903. }
  904. adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
  905. DRIVER_NAME, adev,
  906. hwmon_groups);
  907. if (IS_ERR(adev->pm.int_hwmon_dev)) {
  908. ret = PTR_ERR(adev->pm.int_hwmon_dev);
  909. dev_err(adev->dev,
  910. "Unable to register hwmon device: %d\n", ret);
  911. return ret;
  912. }
  913. ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
  914. if (ret) {
  915. DRM_ERROR("failed to create device file for dpm state\n");
  916. return ret;
  917. }
  918. ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  919. if (ret) {
  920. DRM_ERROR("failed to create device file for dpm state\n");
  921. return ret;
  922. }
  923. if (adev->pp_enabled) {
  924. ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
  925. if (ret) {
  926. DRM_ERROR("failed to create device file pp_num_states\n");
  927. return ret;
  928. }
  929. ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
  930. if (ret) {
  931. DRM_ERROR("failed to create device file pp_cur_state\n");
  932. return ret;
  933. }
  934. ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
  935. if (ret) {
  936. DRM_ERROR("failed to create device file pp_force_state\n");
  937. return ret;
  938. }
  939. ret = device_create_file(adev->dev, &dev_attr_pp_table);
  940. if (ret) {
  941. DRM_ERROR("failed to create device file pp_table\n");
  942. return ret;
  943. }
  944. ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
  945. if (ret) {
  946. DRM_ERROR("failed to create device file pp_dpm_sclk\n");
  947. return ret;
  948. }
  949. ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
  950. if (ret) {
  951. DRM_ERROR("failed to create device file pp_dpm_mclk\n");
  952. return ret;
  953. }
  954. ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
  955. if (ret) {
  956. DRM_ERROR("failed to create device file pp_dpm_pcie\n");
  957. return ret;
  958. }
  959. }
  960. ret = amdgpu_debugfs_pm_init(adev);
  961. if (ret) {
  962. DRM_ERROR("Failed to register debugfs file for dpm!\n");
  963. return ret;
  964. }
  965. adev->pm.sysfs_initialized = true;
  966. return 0;
  967. }
  968. void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
  969. {
  970. if (adev->pm.int_hwmon_dev)
  971. hwmon_device_unregister(adev->pm.int_hwmon_dev);
  972. device_remove_file(adev->dev, &dev_attr_power_dpm_state);
  973. device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  974. if (adev->pp_enabled) {
  975. device_remove_file(adev->dev, &dev_attr_pp_num_states);
  976. device_remove_file(adev->dev, &dev_attr_pp_cur_state);
  977. device_remove_file(adev->dev, &dev_attr_pp_force_state);
  978. device_remove_file(adev->dev, &dev_attr_pp_table);
  979. device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
  980. device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
  981. device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
  982. }
  983. }
  984. void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
  985. {
  986. struct drm_device *ddev = adev->ddev;
  987. struct drm_crtc *crtc;
  988. struct amdgpu_crtc *amdgpu_crtc;
  989. if (!adev->pm.dpm_enabled)
  990. return;
  991. if (adev->pp_enabled) {
  992. int i = 0;
  993. amdgpu_display_bandwidth_update(adev);
  994. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  995. struct amdgpu_ring *ring = adev->rings[i];
  996. if (ring && ring->ready)
  997. amdgpu_fence_wait_empty(ring);
  998. }
  999. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
  1000. } else {
  1001. mutex_lock(&adev->pm.mutex);
  1002. adev->pm.dpm.new_active_crtcs = 0;
  1003. adev->pm.dpm.new_active_crtc_count = 0;
  1004. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  1005. list_for_each_entry(crtc,
  1006. &ddev->mode_config.crtc_list, head) {
  1007. amdgpu_crtc = to_amdgpu_crtc(crtc);
  1008. if (crtc->enabled) {
  1009. adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
  1010. adev->pm.dpm.new_active_crtc_count++;
  1011. }
  1012. }
  1013. }
  1014. /* update battery/ac status */
  1015. if (power_supply_is_system_supplied() > 0)
  1016. adev->pm.dpm.ac_power = true;
  1017. else
  1018. adev->pm.dpm.ac_power = false;
  1019. amdgpu_dpm_change_power_state_locked(adev);
  1020. mutex_unlock(&adev->pm.mutex);
  1021. }
  1022. }
  1023. /*
  1024. * Debugfs info
  1025. */
  1026. #if defined(CONFIG_DEBUG_FS)
  1027. static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
  1028. {
  1029. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1030. struct drm_device *dev = node->minor->dev;
  1031. struct amdgpu_device *adev = dev->dev_private;
  1032. struct drm_device *ddev = adev->ddev;
  1033. if (!adev->pm.dpm_enabled) {
  1034. seq_printf(m, "dpm not enabled\n");
  1035. return 0;
  1036. }
  1037. if ((adev->flags & AMD_IS_PX) &&
  1038. (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
  1039. seq_printf(m, "PX asic powered off\n");
  1040. } else if (adev->pp_enabled) {
  1041. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  1042. } else {
  1043. mutex_lock(&adev->pm.mutex);
  1044. if (adev->pm.funcs->debugfs_print_current_performance_level)
  1045. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  1046. else
  1047. seq_printf(m, "Debugfs support not implemented for this asic\n");
  1048. mutex_unlock(&adev->pm.mutex);
  1049. }
  1050. return 0;
  1051. }
  1052. static struct drm_info_list amdgpu_pm_info_list[] = {
  1053. {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
  1054. };
  1055. #endif
  1056. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
  1057. {
  1058. #if defined(CONFIG_DEBUG_FS)
  1059. return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
  1060. #else
  1061. return 0;
  1062. #endif
  1063. }