amdgpu_pm.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248
  1. /*
  2. * Permission is hereby granted, free of charge, to any person obtaining a
  3. * copy of this software and associated documentation files (the "Software"),
  4. * to deal in the Software without restriction, including without limitation
  5. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  6. * and/or sell copies of the Software, and to permit persons to whom the
  7. * Software is furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  15. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  16. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  17. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  18. * OTHER DEALINGS IN THE SOFTWARE.
  19. *
  20. * Authors: Rafał Miłecki <zajec5@gmail.com>
  21. * Alex Deucher <alexdeucher@gmail.com>
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_drv.h"
  26. #include "amdgpu_pm.h"
  27. #include "amdgpu_dpm.h"
  28. #include "atom.h"
  29. #include <linux/power_supply.h>
  30. #include <linux/hwmon.h>
  31. #include <linux/hwmon-sysfs.h>
  32. #include "amd_powerplay.h"
  33. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
  34. void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
  35. {
  36. if (adev->pp_enabled)
  37. /* TODO */
  38. return;
  39. if (adev->pm.dpm_enabled) {
  40. mutex_lock(&adev->pm.mutex);
  41. if (power_supply_is_system_supplied() > 0)
  42. adev->pm.dpm.ac_power = true;
  43. else
  44. adev->pm.dpm.ac_power = false;
  45. if (adev->pm.funcs->enable_bapm)
  46. amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
  47. mutex_unlock(&adev->pm.mutex);
  48. }
  49. }
  50. static ssize_t amdgpu_get_dpm_state(struct device *dev,
  51. struct device_attribute *attr,
  52. char *buf)
  53. {
  54. struct drm_device *ddev = dev_get_drvdata(dev);
  55. struct amdgpu_device *adev = ddev->dev_private;
  56. enum amd_pm_state_type pm;
  57. if (adev->pp_enabled) {
  58. pm = amdgpu_dpm_get_current_power_state(adev);
  59. } else
  60. pm = adev->pm.dpm.user_state;
  61. return snprintf(buf, PAGE_SIZE, "%s\n",
  62. (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
  63. (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
  64. }
  65. static ssize_t amdgpu_set_dpm_state(struct device *dev,
  66. struct device_attribute *attr,
  67. const char *buf,
  68. size_t count)
  69. {
  70. struct drm_device *ddev = dev_get_drvdata(dev);
  71. struct amdgpu_device *adev = ddev->dev_private;
  72. enum amd_pm_state_type state;
  73. if (strncmp("battery", buf, strlen("battery")) == 0)
  74. state = POWER_STATE_TYPE_BATTERY;
  75. else if (strncmp("balanced", buf, strlen("balanced")) == 0)
  76. state = POWER_STATE_TYPE_BALANCED;
  77. else if (strncmp("performance", buf, strlen("performance")) == 0)
  78. state = POWER_STATE_TYPE_PERFORMANCE;
  79. else {
  80. count = -EINVAL;
  81. goto fail;
  82. }
  83. if (adev->pp_enabled) {
  84. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
  85. } else {
  86. mutex_lock(&adev->pm.mutex);
  87. adev->pm.dpm.user_state = state;
  88. mutex_unlock(&adev->pm.mutex);
  89. /* Can't set dpm state when the card is off */
  90. if (!(adev->flags & AMD_IS_PX) ||
  91. (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
  92. amdgpu_pm_compute_clocks(adev);
  93. }
  94. fail:
  95. return count;
  96. }
  97. static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
  98. struct device_attribute *attr,
  99. char *buf)
  100. {
  101. struct drm_device *ddev = dev_get_drvdata(dev);
  102. struct amdgpu_device *adev = ddev->dev_private;
  103. if ((adev->flags & AMD_IS_PX) &&
  104. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  105. return snprintf(buf, PAGE_SIZE, "off\n");
  106. if (adev->pp_enabled) {
  107. enum amd_dpm_forced_level level;
  108. level = amdgpu_dpm_get_performance_level(adev);
  109. return snprintf(buf, PAGE_SIZE, "%s\n",
  110. (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  111. (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
  112. (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
  113. (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown");
  114. } else {
  115. enum amdgpu_dpm_forced_level level;
  116. level = adev->pm.dpm.forced_level;
  117. return snprintf(buf, PAGE_SIZE, "%s\n",
  118. (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  119. (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
  120. }
  121. }
  122. static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
  123. struct device_attribute *attr,
  124. const char *buf,
  125. size_t count)
  126. {
  127. struct drm_device *ddev = dev_get_drvdata(dev);
  128. struct amdgpu_device *adev = ddev->dev_private;
  129. enum amdgpu_dpm_forced_level level;
  130. int ret = 0;
  131. /* Can't force performance level when the card is off */
  132. if ((adev->flags & AMD_IS_PX) &&
  133. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  134. return -EINVAL;
  135. if (strncmp("low", buf, strlen("low")) == 0) {
  136. level = AMDGPU_DPM_FORCED_LEVEL_LOW;
  137. } else if (strncmp("high", buf, strlen("high")) == 0) {
  138. level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
  139. } else if (strncmp("auto", buf, strlen("auto")) == 0) {
  140. level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
  141. } else if (strncmp("manual", buf, strlen("manual")) == 0) {
  142. level = AMDGPU_DPM_FORCED_LEVEL_MANUAL;
  143. } else {
  144. count = -EINVAL;
  145. goto fail;
  146. }
  147. if (adev->pp_enabled)
  148. amdgpu_dpm_force_performance_level(adev, level);
  149. else {
  150. mutex_lock(&adev->pm.mutex);
  151. if (adev->pm.dpm.thermal_active) {
  152. count = -EINVAL;
  153. mutex_unlock(&adev->pm.mutex);
  154. goto fail;
  155. }
  156. ret = amdgpu_dpm_force_performance_level(adev, level);
  157. if (ret)
  158. count = -EINVAL;
  159. else
  160. adev->pm.dpm.forced_level = level;
  161. mutex_unlock(&adev->pm.mutex);
  162. }
  163. fail:
  164. return count;
  165. }
  166. static ssize_t amdgpu_get_pp_num_states(struct device *dev,
  167. struct device_attribute *attr,
  168. char *buf)
  169. {
  170. struct drm_device *ddev = dev_get_drvdata(dev);
  171. struct amdgpu_device *adev = ddev->dev_private;
  172. struct pp_states_info data;
  173. int i, buf_len;
  174. if (adev->pp_enabled)
  175. amdgpu_dpm_get_pp_num_states(adev, &data);
  176. buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
  177. for (i = 0; i < data.nums; i++)
  178. buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
  179. (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
  180. (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
  181. (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
  182. (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
  183. return buf_len;
  184. }
  185. static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
  186. struct device_attribute *attr,
  187. char *buf)
  188. {
  189. struct drm_device *ddev = dev_get_drvdata(dev);
  190. struct amdgpu_device *adev = ddev->dev_private;
  191. struct pp_states_info data;
  192. enum amd_pm_state_type pm = 0;
  193. int i = 0;
  194. if (adev->pp_enabled) {
  195. pm = amdgpu_dpm_get_current_power_state(adev);
  196. amdgpu_dpm_get_pp_num_states(adev, &data);
  197. for (i = 0; i < data.nums; i++) {
  198. if (pm == data.states[i])
  199. break;
  200. }
  201. if (i == data.nums)
  202. i = -EINVAL;
  203. }
  204. return snprintf(buf, PAGE_SIZE, "%d\n", i);
  205. }
  206. static ssize_t amdgpu_get_pp_force_state(struct device *dev,
  207. struct device_attribute *attr,
  208. char *buf)
  209. {
  210. struct drm_device *ddev = dev_get_drvdata(dev);
  211. struct amdgpu_device *adev = ddev->dev_private;
  212. struct pp_states_info data;
  213. enum amd_pm_state_type pm = 0;
  214. int i;
  215. if (adev->pp_force_state_enabled && adev->pp_enabled) {
  216. pm = amdgpu_dpm_get_current_power_state(adev);
  217. amdgpu_dpm_get_pp_num_states(adev, &data);
  218. for (i = 0; i < data.nums; i++) {
  219. if (pm == data.states[i])
  220. break;
  221. }
  222. if (i == data.nums)
  223. i = -EINVAL;
  224. return snprintf(buf, PAGE_SIZE, "%d\n", i);
  225. } else
  226. return snprintf(buf, PAGE_SIZE, "\n");
  227. }
  228. static ssize_t amdgpu_set_pp_force_state(struct device *dev,
  229. struct device_attribute *attr,
  230. const char *buf,
  231. size_t count)
  232. {
  233. struct drm_device *ddev = dev_get_drvdata(dev);
  234. struct amdgpu_device *adev = ddev->dev_private;
  235. enum amd_pm_state_type state = 0;
  236. long idx;
  237. int ret;
  238. if (strlen(buf) == 1)
  239. adev->pp_force_state_enabled = false;
  240. else {
  241. ret = kstrtol(buf, 0, &idx);
  242. if (ret) {
  243. count = -EINVAL;
  244. goto fail;
  245. }
  246. if (adev->pp_enabled) {
  247. struct pp_states_info data;
  248. amdgpu_dpm_get_pp_num_states(adev, &data);
  249. state = data.states[idx];
  250. /* only set user selected power states */
  251. if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
  252. state != POWER_STATE_TYPE_DEFAULT) {
  253. amdgpu_dpm_dispatch_task(adev,
  254. AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
  255. adev->pp_force_state_enabled = true;
  256. }
  257. }
  258. }
  259. fail:
  260. return count;
  261. }
  262. static ssize_t amdgpu_get_pp_table(struct device *dev,
  263. struct device_attribute *attr,
  264. char *buf)
  265. {
  266. struct drm_device *ddev = dev_get_drvdata(dev);
  267. struct amdgpu_device *adev = ddev->dev_private;
  268. char *table = NULL;
  269. int size, i;
  270. if (adev->pp_enabled)
  271. size = amdgpu_dpm_get_pp_table(adev, &table);
  272. else
  273. return 0;
  274. if (size >= PAGE_SIZE)
  275. size = PAGE_SIZE - 1;
  276. for (i = 0; i < size; i++) {
  277. sprintf(buf + i, "%02x", table[i]);
  278. }
  279. sprintf(buf + i, "\n");
  280. return size;
  281. }
  282. static ssize_t amdgpu_set_pp_table(struct device *dev,
  283. struct device_attribute *attr,
  284. const char *buf,
  285. size_t count)
  286. {
  287. struct drm_device *ddev = dev_get_drvdata(dev);
  288. struct amdgpu_device *adev = ddev->dev_private;
  289. if (adev->pp_enabled)
  290. amdgpu_dpm_set_pp_table(adev, buf, count);
  291. return count;
  292. }
  293. static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
  294. struct device_attribute *attr,
  295. char *buf)
  296. {
  297. struct drm_device *ddev = dev_get_drvdata(dev);
  298. struct amdgpu_device *adev = ddev->dev_private;
  299. ssize_t size = 0;
  300. if (adev->pp_enabled)
  301. size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
  302. return size;
  303. }
  304. static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
  305. struct device_attribute *attr,
  306. const char *buf,
  307. size_t count)
  308. {
  309. struct drm_device *ddev = dev_get_drvdata(dev);
  310. struct amdgpu_device *adev = ddev->dev_private;
  311. int ret;
  312. long level;
  313. uint32_t i, mask = 0;
  314. char sub_str[2];
  315. for (i = 0; i < strlen(buf) - 1; i++) {
  316. sub_str[0] = *(buf + i);
  317. sub_str[1] = '\0';
  318. ret = kstrtol(sub_str, 0, &level);
  319. if (ret) {
  320. count = -EINVAL;
  321. goto fail;
  322. }
  323. mask |= 1 << level;
  324. }
  325. if (adev->pp_enabled)
  326. amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
  327. fail:
  328. return count;
  329. }
  330. static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
  331. struct device_attribute *attr,
  332. char *buf)
  333. {
  334. struct drm_device *ddev = dev_get_drvdata(dev);
  335. struct amdgpu_device *adev = ddev->dev_private;
  336. ssize_t size = 0;
  337. if (adev->pp_enabled)
  338. size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
  339. return size;
  340. }
  341. static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
  342. struct device_attribute *attr,
  343. const char *buf,
  344. size_t count)
  345. {
  346. struct drm_device *ddev = dev_get_drvdata(dev);
  347. struct amdgpu_device *adev = ddev->dev_private;
  348. int ret;
  349. long level;
  350. uint32_t i, mask = 0;
  351. char sub_str[2];
  352. for (i = 0; i < strlen(buf) - 1; i++) {
  353. sub_str[0] = *(buf + i);
  354. sub_str[1] = '\0';
  355. ret = kstrtol(sub_str, 0, &level);
  356. if (ret) {
  357. count = -EINVAL;
  358. goto fail;
  359. }
  360. mask |= 1 << level;
  361. }
  362. if (adev->pp_enabled)
  363. amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
  364. fail:
  365. return count;
  366. }
  367. static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
  368. struct device_attribute *attr,
  369. char *buf)
  370. {
  371. struct drm_device *ddev = dev_get_drvdata(dev);
  372. struct amdgpu_device *adev = ddev->dev_private;
  373. ssize_t size = 0;
  374. if (adev->pp_enabled)
  375. size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
  376. return size;
  377. }
  378. static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
  379. struct device_attribute *attr,
  380. const char *buf,
  381. size_t count)
  382. {
  383. struct drm_device *ddev = dev_get_drvdata(dev);
  384. struct amdgpu_device *adev = ddev->dev_private;
  385. int ret;
  386. long level;
  387. uint32_t i, mask = 0;
  388. char sub_str[2];
  389. for (i = 0; i < strlen(buf) - 1; i++) {
  390. sub_str[0] = *(buf + i);
  391. sub_str[1] = '\0';
  392. ret = kstrtol(sub_str, 0, &level);
  393. if (ret) {
  394. count = -EINVAL;
  395. goto fail;
  396. }
  397. mask |= 1 << level;
  398. }
  399. if (adev->pp_enabled)
  400. amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
  401. fail:
  402. return count;
  403. }
  404. static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
  405. static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
  406. amdgpu_get_dpm_forced_performance_level,
  407. amdgpu_set_dpm_forced_performance_level);
  408. static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
  409. static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
  410. static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
  411. amdgpu_get_pp_force_state,
  412. amdgpu_set_pp_force_state);
  413. static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
  414. amdgpu_get_pp_table,
  415. amdgpu_set_pp_table);
  416. static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
  417. amdgpu_get_pp_dpm_sclk,
  418. amdgpu_set_pp_dpm_sclk);
  419. static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
  420. amdgpu_get_pp_dpm_mclk,
  421. amdgpu_set_pp_dpm_mclk);
  422. static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
  423. amdgpu_get_pp_dpm_pcie,
  424. amdgpu_set_pp_dpm_pcie);
  425. static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
  426. struct device_attribute *attr,
  427. char *buf)
  428. {
  429. struct amdgpu_device *adev = dev_get_drvdata(dev);
  430. struct drm_device *ddev = adev->ddev;
  431. int temp;
  432. /* Can't get temperature when the card is off */
  433. if ((adev->flags & AMD_IS_PX) &&
  434. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  435. return -EINVAL;
  436. if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
  437. temp = 0;
  438. else
  439. temp = amdgpu_dpm_get_temperature(adev);
  440. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  441. }
  442. static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
  443. struct device_attribute *attr,
  444. char *buf)
  445. {
  446. struct amdgpu_device *adev = dev_get_drvdata(dev);
  447. int hyst = to_sensor_dev_attr(attr)->index;
  448. int temp;
  449. if (hyst)
  450. temp = adev->pm.dpm.thermal.min_temp;
  451. else
  452. temp = adev->pm.dpm.thermal.max_temp;
  453. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  454. }
  455. static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
  456. struct device_attribute *attr,
  457. char *buf)
  458. {
  459. struct amdgpu_device *adev = dev_get_drvdata(dev);
  460. u32 pwm_mode = 0;
  461. if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
  462. return -EINVAL;
  463. pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
  464. /* never 0 (full-speed), fuse or smc-controlled always */
  465. return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
  466. }
  467. static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
  468. struct device_attribute *attr,
  469. const char *buf,
  470. size_t count)
  471. {
  472. struct amdgpu_device *adev = dev_get_drvdata(dev);
  473. int err;
  474. int value;
  475. if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
  476. return -EINVAL;
  477. err = kstrtoint(buf, 10, &value);
  478. if (err)
  479. return err;
  480. switch (value) {
  481. case 1: /* manual, percent-based */
  482. amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
  483. break;
  484. default: /* disable */
  485. amdgpu_dpm_set_fan_control_mode(adev, 0);
  486. break;
  487. }
  488. return count;
  489. }
  490. static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
  491. struct device_attribute *attr,
  492. char *buf)
  493. {
  494. return sprintf(buf, "%i\n", 0);
  495. }
  496. static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
  497. struct device_attribute *attr,
  498. char *buf)
  499. {
  500. return sprintf(buf, "%i\n", 255);
  501. }
  502. static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
  503. struct device_attribute *attr,
  504. const char *buf, size_t count)
  505. {
  506. struct amdgpu_device *adev = dev_get_drvdata(dev);
  507. int err;
  508. u32 value;
  509. err = kstrtou32(buf, 10, &value);
  510. if (err)
  511. return err;
  512. value = (value * 100) / 255;
  513. err = amdgpu_dpm_set_fan_speed_percent(adev, value);
  514. if (err)
  515. return err;
  516. return count;
  517. }
  518. static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
  519. struct device_attribute *attr,
  520. char *buf)
  521. {
  522. struct amdgpu_device *adev = dev_get_drvdata(dev);
  523. int err;
  524. u32 speed;
  525. err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
  526. if (err)
  527. return err;
  528. speed = (speed * 255) / 100;
  529. return sprintf(buf, "%i\n", speed);
  530. }
  531. static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
  532. static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
  533. static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
  534. static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
  535. static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
  536. static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
  537. static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
  538. static struct attribute *hwmon_attributes[] = {
  539. &sensor_dev_attr_temp1_input.dev_attr.attr,
  540. &sensor_dev_attr_temp1_crit.dev_attr.attr,
  541. &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
  542. &sensor_dev_attr_pwm1.dev_attr.attr,
  543. &sensor_dev_attr_pwm1_enable.dev_attr.attr,
  544. &sensor_dev_attr_pwm1_min.dev_attr.attr,
  545. &sensor_dev_attr_pwm1_max.dev_attr.attr,
  546. NULL
  547. };
  548. static umode_t hwmon_attributes_visible(struct kobject *kobj,
  549. struct attribute *attr, int index)
  550. {
  551. struct device *dev = kobj_to_dev(kobj);
  552. struct amdgpu_device *adev = dev_get_drvdata(dev);
  553. umode_t effective_mode = attr->mode;
  554. /* Skip limit attributes if DPM is not enabled */
  555. if (!adev->pm.dpm_enabled &&
  556. (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
  557. attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
  558. attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  559. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  560. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  561. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  562. return 0;
  563. if (adev->pp_enabled)
  564. return effective_mode;
  565. /* Skip fan attributes if fan is not present */
  566. if (adev->pm.no_fan &&
  567. (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  568. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  569. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  570. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  571. return 0;
  572. /* mask fan attributes if we have no bindings for this asic to expose */
  573. if ((!adev->pm.funcs->get_fan_speed_percent &&
  574. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
  575. (!adev->pm.funcs->get_fan_control_mode &&
  576. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
  577. effective_mode &= ~S_IRUGO;
  578. if ((!adev->pm.funcs->set_fan_speed_percent &&
  579. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
  580. (!adev->pm.funcs->set_fan_control_mode &&
  581. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
  582. effective_mode &= ~S_IWUSR;
  583. /* hide max/min values if we can't both query and manage the fan */
  584. if ((!adev->pm.funcs->set_fan_speed_percent &&
  585. !adev->pm.funcs->get_fan_speed_percent) &&
  586. (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  587. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  588. return 0;
  589. return effective_mode;
  590. }
  591. static const struct attribute_group hwmon_attrgroup = {
  592. .attrs = hwmon_attributes,
  593. .is_visible = hwmon_attributes_visible,
  594. };
  595. static const struct attribute_group *hwmon_groups[] = {
  596. &hwmon_attrgroup,
  597. NULL
  598. };
  599. void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
  600. {
  601. struct amdgpu_device *adev =
  602. container_of(work, struct amdgpu_device,
  603. pm.dpm.thermal.work);
  604. /* switch to the thermal state */
  605. enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
  606. if (!adev->pm.dpm_enabled)
  607. return;
  608. if (adev->pm.funcs->get_temperature) {
  609. int temp = amdgpu_dpm_get_temperature(adev);
  610. if (temp < adev->pm.dpm.thermal.min_temp)
  611. /* switch back the user state */
  612. dpm_state = adev->pm.dpm.user_state;
  613. } else {
  614. if (adev->pm.dpm.thermal.high_to_low)
  615. /* switch back the user state */
  616. dpm_state = adev->pm.dpm.user_state;
  617. }
  618. mutex_lock(&adev->pm.mutex);
  619. if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
  620. adev->pm.dpm.thermal_active = true;
  621. else
  622. adev->pm.dpm.thermal_active = false;
  623. adev->pm.dpm.state = dpm_state;
  624. mutex_unlock(&adev->pm.mutex);
  625. amdgpu_pm_compute_clocks(adev);
  626. }
  627. static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
  628. enum amd_pm_state_type dpm_state)
  629. {
  630. int i;
  631. struct amdgpu_ps *ps;
  632. u32 ui_class;
  633. bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
  634. true : false;
  635. /* check if the vblank period is too short to adjust the mclk */
  636. if (single_display && adev->pm.funcs->vblank_too_short) {
  637. if (amdgpu_dpm_vblank_too_short(adev))
  638. single_display = false;
  639. }
  640. /* certain older asics have a separare 3D performance state,
  641. * so try that first if the user selected performance
  642. */
  643. if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
  644. dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
  645. /* balanced states don't exist at the moment */
  646. if (dpm_state == POWER_STATE_TYPE_BALANCED)
  647. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  648. restart_search:
  649. /* Pick the best power state based on current conditions */
  650. for (i = 0; i < adev->pm.dpm.num_ps; i++) {
  651. ps = &adev->pm.dpm.ps[i];
  652. ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
  653. switch (dpm_state) {
  654. /* user states */
  655. case POWER_STATE_TYPE_BATTERY:
  656. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
  657. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  658. if (single_display)
  659. return ps;
  660. } else
  661. return ps;
  662. }
  663. break;
  664. case POWER_STATE_TYPE_BALANCED:
  665. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
  666. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  667. if (single_display)
  668. return ps;
  669. } else
  670. return ps;
  671. }
  672. break;
  673. case POWER_STATE_TYPE_PERFORMANCE:
  674. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
  675. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  676. if (single_display)
  677. return ps;
  678. } else
  679. return ps;
  680. }
  681. break;
  682. /* internal states */
  683. case POWER_STATE_TYPE_INTERNAL_UVD:
  684. if (adev->pm.dpm.uvd_ps)
  685. return adev->pm.dpm.uvd_ps;
  686. else
  687. break;
  688. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  689. if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  690. return ps;
  691. break;
  692. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  693. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  694. return ps;
  695. break;
  696. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  697. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  698. return ps;
  699. break;
  700. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  701. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  702. return ps;
  703. break;
  704. case POWER_STATE_TYPE_INTERNAL_BOOT:
  705. return adev->pm.dpm.boot_ps;
  706. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  707. if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  708. return ps;
  709. break;
  710. case POWER_STATE_TYPE_INTERNAL_ACPI:
  711. if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  712. return ps;
  713. break;
  714. case POWER_STATE_TYPE_INTERNAL_ULV:
  715. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  716. return ps;
  717. break;
  718. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  719. if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  720. return ps;
  721. break;
  722. default:
  723. break;
  724. }
  725. }
  726. /* use a fallback state if we didn't match */
  727. switch (dpm_state) {
  728. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  729. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
  730. goto restart_search;
  731. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  732. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  733. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  734. if (adev->pm.dpm.uvd_ps) {
  735. return adev->pm.dpm.uvd_ps;
  736. } else {
  737. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  738. goto restart_search;
  739. }
  740. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  741. dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
  742. goto restart_search;
  743. case POWER_STATE_TYPE_INTERNAL_ACPI:
  744. dpm_state = POWER_STATE_TYPE_BATTERY;
  745. goto restart_search;
  746. case POWER_STATE_TYPE_BATTERY:
  747. case POWER_STATE_TYPE_BALANCED:
  748. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  749. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  750. goto restart_search;
  751. default:
  752. break;
  753. }
  754. return NULL;
  755. }
  756. static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
  757. {
  758. int i;
  759. struct amdgpu_ps *ps;
  760. enum amd_pm_state_type dpm_state;
  761. int ret;
  762. /* if dpm init failed */
  763. if (!adev->pm.dpm_enabled)
  764. return;
  765. if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
  766. /* add other state override checks here */
  767. if ((!adev->pm.dpm.thermal_active) &&
  768. (!adev->pm.dpm.uvd_active))
  769. adev->pm.dpm.state = adev->pm.dpm.user_state;
  770. }
  771. dpm_state = adev->pm.dpm.state;
  772. ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
  773. if (ps)
  774. adev->pm.dpm.requested_ps = ps;
  775. else
  776. return;
  777. /* no need to reprogram if nothing changed unless we are on BTC+ */
  778. if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
  779. /* vce just modifies an existing state so force a change */
  780. if (ps->vce_active != adev->pm.dpm.vce_active)
  781. goto force;
  782. if (adev->flags & AMD_IS_APU) {
  783. /* for APUs if the num crtcs changed but state is the same,
  784. * all we need to do is update the display configuration.
  785. */
  786. if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
  787. /* update display watermarks based on new power state */
  788. amdgpu_display_bandwidth_update(adev);
  789. /* update displays */
  790. amdgpu_dpm_display_configuration_changed(adev);
  791. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  792. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  793. }
  794. return;
  795. } else {
  796. /* for BTC+ if the num crtcs hasn't changed and state is the same,
  797. * nothing to do, if the num crtcs is > 1 and state is the same,
  798. * update display configuration.
  799. */
  800. if (adev->pm.dpm.new_active_crtcs ==
  801. adev->pm.dpm.current_active_crtcs) {
  802. return;
  803. } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
  804. (adev->pm.dpm.new_active_crtc_count > 1)) {
  805. /* update display watermarks based on new power state */
  806. amdgpu_display_bandwidth_update(adev);
  807. /* update displays */
  808. amdgpu_dpm_display_configuration_changed(adev);
  809. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  810. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  811. return;
  812. }
  813. }
  814. }
  815. force:
  816. if (amdgpu_dpm == 1) {
  817. printk("switching from power state:\n");
  818. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
  819. printk("switching to power state:\n");
  820. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
  821. }
  822. /* update whether vce is active */
  823. ps->vce_active = adev->pm.dpm.vce_active;
  824. ret = amdgpu_dpm_pre_set_power_state(adev);
  825. if (ret)
  826. return;
  827. /* update display watermarks based on new power state */
  828. amdgpu_display_bandwidth_update(adev);
  829. /* wait for the rings to drain */
  830. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  831. struct amdgpu_ring *ring = adev->rings[i];
  832. if (ring && ring->ready)
  833. amdgpu_fence_wait_empty(ring);
  834. }
  835. /* program the new power state */
  836. amdgpu_dpm_set_power_state(adev);
  837. /* update current power state */
  838. adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
  839. amdgpu_dpm_post_set_power_state(adev);
  840. /* update displays */
  841. amdgpu_dpm_display_configuration_changed(adev);
  842. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  843. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  844. if (adev->pm.funcs->force_performance_level) {
  845. if (adev->pm.dpm.thermal_active) {
  846. enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
  847. /* force low perf level for thermal */
  848. amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
  849. /* save the user's level */
  850. adev->pm.dpm.forced_level = level;
  851. } else {
  852. /* otherwise, user selected level */
  853. amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
  854. }
  855. }
  856. }
  857. void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
  858. {
  859. if (adev->pp_enabled)
  860. amdgpu_dpm_powergate_uvd(adev, !enable);
  861. else {
  862. if (adev->pm.funcs->powergate_uvd) {
  863. mutex_lock(&adev->pm.mutex);
  864. /* enable/disable UVD */
  865. amdgpu_dpm_powergate_uvd(adev, !enable);
  866. mutex_unlock(&adev->pm.mutex);
  867. } else {
  868. if (enable) {
  869. mutex_lock(&adev->pm.mutex);
  870. adev->pm.dpm.uvd_active = true;
  871. adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
  872. mutex_unlock(&adev->pm.mutex);
  873. } else {
  874. mutex_lock(&adev->pm.mutex);
  875. adev->pm.dpm.uvd_active = false;
  876. mutex_unlock(&adev->pm.mutex);
  877. }
  878. amdgpu_pm_compute_clocks(adev);
  879. }
  880. }
  881. }
  882. void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
  883. {
  884. if (adev->pp_enabled)
  885. amdgpu_dpm_powergate_vce(adev, !enable);
  886. else {
  887. if (adev->pm.funcs->powergate_vce) {
  888. mutex_lock(&adev->pm.mutex);
  889. amdgpu_dpm_powergate_vce(adev, !enable);
  890. mutex_unlock(&adev->pm.mutex);
  891. } else {
  892. if (enable) {
  893. mutex_lock(&adev->pm.mutex);
  894. adev->pm.dpm.vce_active = true;
  895. /* XXX select vce level based on ring/task */
  896. adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
  897. mutex_unlock(&adev->pm.mutex);
  898. } else {
  899. mutex_lock(&adev->pm.mutex);
  900. adev->pm.dpm.vce_active = false;
  901. mutex_unlock(&adev->pm.mutex);
  902. }
  903. amdgpu_pm_compute_clocks(adev);
  904. }
  905. }
  906. }
  907. void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
  908. {
  909. int i;
  910. if (adev->pp_enabled)
  911. /* TO DO */
  912. return;
  913. for (i = 0; i < adev->pm.dpm.num_ps; i++)
  914. amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
  915. }
  916. int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
  917. {
  918. int ret;
  919. if (adev->pm.sysfs_initialized)
  920. return 0;
  921. if (!adev->pp_enabled) {
  922. if (adev->pm.funcs->get_temperature == NULL)
  923. return 0;
  924. }
  925. adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
  926. DRIVER_NAME, adev,
  927. hwmon_groups);
  928. if (IS_ERR(adev->pm.int_hwmon_dev)) {
  929. ret = PTR_ERR(adev->pm.int_hwmon_dev);
  930. dev_err(adev->dev,
  931. "Unable to register hwmon device: %d\n", ret);
  932. return ret;
  933. }
  934. ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
  935. if (ret) {
  936. DRM_ERROR("failed to create device file for dpm state\n");
  937. return ret;
  938. }
  939. ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  940. if (ret) {
  941. DRM_ERROR("failed to create device file for dpm state\n");
  942. return ret;
  943. }
  944. if (adev->pp_enabled) {
  945. ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
  946. if (ret) {
  947. DRM_ERROR("failed to create device file pp_num_states\n");
  948. return ret;
  949. }
  950. ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
  951. if (ret) {
  952. DRM_ERROR("failed to create device file pp_cur_state\n");
  953. return ret;
  954. }
  955. ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
  956. if (ret) {
  957. DRM_ERROR("failed to create device file pp_force_state\n");
  958. return ret;
  959. }
  960. ret = device_create_file(adev->dev, &dev_attr_pp_table);
  961. if (ret) {
  962. DRM_ERROR("failed to create device file pp_table\n");
  963. return ret;
  964. }
  965. ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
  966. if (ret) {
  967. DRM_ERROR("failed to create device file pp_dpm_sclk\n");
  968. return ret;
  969. }
  970. ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
  971. if (ret) {
  972. DRM_ERROR("failed to create device file pp_dpm_mclk\n");
  973. return ret;
  974. }
  975. ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
  976. if (ret) {
  977. DRM_ERROR("failed to create device file pp_dpm_pcie\n");
  978. return ret;
  979. }
  980. }
  981. ret = amdgpu_debugfs_pm_init(adev);
  982. if (ret) {
  983. DRM_ERROR("Failed to register debugfs file for dpm!\n");
  984. return ret;
  985. }
  986. adev->pm.sysfs_initialized = true;
  987. return 0;
  988. }
  989. void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
  990. {
  991. if (adev->pm.int_hwmon_dev)
  992. hwmon_device_unregister(adev->pm.int_hwmon_dev);
  993. device_remove_file(adev->dev, &dev_attr_power_dpm_state);
  994. device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  995. if (adev->pp_enabled) {
  996. device_remove_file(adev->dev, &dev_attr_pp_num_states);
  997. device_remove_file(adev->dev, &dev_attr_pp_cur_state);
  998. device_remove_file(adev->dev, &dev_attr_pp_force_state);
  999. device_remove_file(adev->dev, &dev_attr_pp_table);
  1000. device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
  1001. device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
  1002. device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
  1003. }
  1004. }
  1005. void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
  1006. {
  1007. struct drm_device *ddev = adev->ddev;
  1008. struct drm_crtc *crtc;
  1009. struct amdgpu_crtc *amdgpu_crtc;
  1010. if (!adev->pm.dpm_enabled)
  1011. return;
  1012. if (adev->pp_enabled) {
  1013. int i = 0;
  1014. amdgpu_display_bandwidth_update(adev);
  1015. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  1016. struct amdgpu_ring *ring = adev->rings[i];
  1017. if (ring && ring->ready)
  1018. amdgpu_fence_wait_empty(ring);
  1019. }
  1020. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
  1021. } else {
  1022. mutex_lock(&adev->pm.mutex);
  1023. adev->pm.dpm.new_active_crtcs = 0;
  1024. adev->pm.dpm.new_active_crtc_count = 0;
  1025. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  1026. list_for_each_entry(crtc,
  1027. &ddev->mode_config.crtc_list, head) {
  1028. amdgpu_crtc = to_amdgpu_crtc(crtc);
  1029. if (crtc->enabled) {
  1030. adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
  1031. adev->pm.dpm.new_active_crtc_count++;
  1032. }
  1033. }
  1034. }
  1035. /* update battery/ac status */
  1036. if (power_supply_is_system_supplied() > 0)
  1037. adev->pm.dpm.ac_power = true;
  1038. else
  1039. adev->pm.dpm.ac_power = false;
  1040. amdgpu_dpm_change_power_state_locked(adev);
  1041. mutex_unlock(&adev->pm.mutex);
  1042. }
  1043. }
  1044. /*
  1045. * Debugfs info
  1046. */
  1047. #if defined(CONFIG_DEBUG_FS)
  1048. static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
  1049. {
  1050. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1051. struct drm_device *dev = node->minor->dev;
  1052. struct amdgpu_device *adev = dev->dev_private;
  1053. struct drm_device *ddev = adev->ddev;
  1054. if (!adev->pm.dpm_enabled) {
  1055. seq_printf(m, "dpm not enabled\n");
  1056. return 0;
  1057. }
  1058. if ((adev->flags & AMD_IS_PX) &&
  1059. (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
  1060. seq_printf(m, "PX asic powered off\n");
  1061. } else if (adev->pp_enabled) {
  1062. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  1063. } else {
  1064. mutex_lock(&adev->pm.mutex);
  1065. if (adev->pm.funcs->debugfs_print_current_performance_level)
  1066. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  1067. else
  1068. seq_printf(m, "Debugfs support not implemented for this asic\n");
  1069. mutex_unlock(&adev->pm.mutex);
  1070. }
  1071. return 0;
  1072. }
  1073. static const struct drm_info_list amdgpu_pm_info_list[] = {
  1074. {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
  1075. };
  1076. #endif
  1077. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
  1078. {
  1079. #if defined(CONFIG_DEBUG_FS)
  1080. return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
  1081. #else
  1082. return 0;
  1083. #endif
  1084. }