amdgpu_pm.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. /*
  2. * Permission is hereby granted, free of charge, to any person obtaining a
  3. * copy of this software and associated documentation files (the "Software"),
  4. * to deal in the Software without restriction, including without limitation
  5. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  6. * and/or sell copies of the Software, and to permit persons to whom the
  7. * Software is furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  15. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  16. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  17. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  18. * OTHER DEALINGS IN THE SOFTWARE.
  19. *
  20. * Authors: Rafał Miłecki <zajec5@gmail.com>
  21. * Alex Deucher <alexdeucher@gmail.com>
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_drv.h"
  26. #include "amdgpu_pm.h"
  27. #include "amdgpu_dpm.h"
  28. #include "atom.h"
  29. #include <linux/power_supply.h>
  30. #include <linux/hwmon.h>
  31. #include <linux/hwmon-sysfs.h>
  32. #include "amd_powerplay.h"
  33. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
  34. void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
  35. {
  36. if (amdgpu_powerplay)
  37. /* TODO */
  38. return;
  39. if (adev->pm.dpm_enabled) {
  40. mutex_lock(&adev->pm.mutex);
  41. if (power_supply_is_system_supplied() > 0)
  42. adev->pm.dpm.ac_power = true;
  43. else
  44. adev->pm.dpm.ac_power = false;
  45. if (adev->pm.funcs->enable_bapm)
  46. amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
  47. mutex_unlock(&adev->pm.mutex);
  48. }
  49. }
  50. static ssize_t amdgpu_get_dpm_state(struct device *dev,
  51. struct device_attribute *attr,
  52. char *buf)
  53. {
  54. struct drm_device *ddev = dev_get_drvdata(dev);
  55. struct amdgpu_device *adev = ddev->dev_private;
  56. enum amd_pm_state_type pm;
  57. if (amdgpu_powerplay) {
  58. pm = amdgpu_dpm_get_current_power_state(adev);
  59. } else
  60. pm = adev->pm.dpm.user_state;
  61. return snprintf(buf, PAGE_SIZE, "%s\n",
  62. (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
  63. (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
  64. }
  65. static ssize_t amdgpu_set_dpm_state(struct device *dev,
  66. struct device_attribute *attr,
  67. const char *buf,
  68. size_t count)
  69. {
  70. struct drm_device *ddev = dev_get_drvdata(dev);
  71. struct amdgpu_device *adev = ddev->dev_private;
  72. enum amd_pm_state_type state;
  73. if (strncmp("battery", buf, strlen("battery")) == 0)
  74. state = POWER_STATE_TYPE_BATTERY;
  75. else if (strncmp("balanced", buf, strlen("balanced")) == 0)
  76. state = POWER_STATE_TYPE_BALANCED;
  77. else if (strncmp("performance", buf, strlen("performance")) == 0)
  78. state = POWER_STATE_TYPE_PERFORMANCE;
  79. else {
  80. count = -EINVAL;
  81. goto fail;
  82. }
  83. if (amdgpu_powerplay) {
  84. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
  85. } else {
  86. mutex_lock(&adev->pm.mutex);
  87. adev->pm.dpm.user_state = state;
  88. mutex_unlock(&adev->pm.mutex);
  89. /* Can't set dpm state when the card is off */
  90. if (!(adev->flags & AMD_IS_PX) ||
  91. (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
  92. amdgpu_pm_compute_clocks(adev);
  93. }
  94. fail:
  95. return count;
  96. }
  97. static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
  98. struct device_attribute *attr,
  99. char *buf)
  100. {
  101. struct drm_device *ddev = dev_get_drvdata(dev);
  102. struct amdgpu_device *adev = ddev->dev_private;
  103. if (amdgpu_powerplay) {
  104. enum amd_dpm_forced_level level;
  105. level = amdgpu_dpm_get_performance_level(adev);
  106. return snprintf(buf, PAGE_SIZE, "%s\n",
  107. (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  108. (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
  109. } else {
  110. enum amdgpu_dpm_forced_level level;
  111. level = adev->pm.dpm.forced_level;
  112. return snprintf(buf, PAGE_SIZE, "%s\n",
  113. (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  114. (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
  115. }
  116. }
  117. static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
  118. struct device_attribute *attr,
  119. const char *buf,
  120. size_t count)
  121. {
  122. struct drm_device *ddev = dev_get_drvdata(dev);
  123. struct amdgpu_device *adev = ddev->dev_private;
  124. enum amdgpu_dpm_forced_level level;
  125. int ret = 0;
  126. if (strncmp("low", buf, strlen("low")) == 0) {
  127. level = AMDGPU_DPM_FORCED_LEVEL_LOW;
  128. } else if (strncmp("high", buf, strlen("high")) == 0) {
  129. level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
  130. } else if (strncmp("auto", buf, strlen("auto")) == 0) {
  131. level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
  132. } else {
  133. count = -EINVAL;
  134. goto fail;
  135. }
  136. if (amdgpu_powerplay)
  137. amdgpu_dpm_force_performance_level(adev, level);
  138. else {
  139. mutex_lock(&adev->pm.mutex);
  140. if (adev->pm.dpm.thermal_active) {
  141. count = -EINVAL;
  142. goto fail;
  143. }
  144. ret = amdgpu_dpm_force_performance_level(adev, level);
  145. if (ret)
  146. count = -EINVAL;
  147. else
  148. adev->pm.dpm.forced_level = level;
  149. mutex_unlock(&adev->pm.mutex);
  150. }
  151. fail:
  152. mutex_unlock(&adev->pm.mutex);
  153. return count;
  154. }
  155. static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
  156. static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
  157. amdgpu_get_dpm_forced_performance_level,
  158. amdgpu_set_dpm_forced_performance_level);
  159. static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
  160. struct device_attribute *attr,
  161. char *buf)
  162. {
  163. struct amdgpu_device *adev = dev_get_drvdata(dev);
  164. int temp;
  165. if (adev->pm.funcs->get_temperature)
  166. temp = amdgpu_dpm_get_temperature(adev);
  167. else
  168. temp = 0;
  169. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  170. }
  171. static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
  172. struct device_attribute *attr,
  173. char *buf)
  174. {
  175. struct amdgpu_device *adev = dev_get_drvdata(dev);
  176. int hyst = to_sensor_dev_attr(attr)->index;
  177. int temp;
  178. if (hyst)
  179. temp = adev->pm.dpm.thermal.min_temp;
  180. else
  181. temp = adev->pm.dpm.thermal.max_temp;
  182. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  183. }
  184. static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
  185. struct device_attribute *attr,
  186. char *buf)
  187. {
  188. struct amdgpu_device *adev = dev_get_drvdata(dev);
  189. u32 pwm_mode = 0;
  190. if (adev->pm.funcs->get_fan_control_mode)
  191. pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
  192. /* never 0 (full-speed), fuse or smc-controlled always */
  193. return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
  194. }
  195. static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
  196. struct device_attribute *attr,
  197. const char *buf,
  198. size_t count)
  199. {
  200. struct amdgpu_device *adev = dev_get_drvdata(dev);
  201. int err;
  202. int value;
  203. if (!adev->pm.funcs->set_fan_control_mode)
  204. return -EINVAL;
  205. err = kstrtoint(buf, 10, &value);
  206. if (err)
  207. return err;
  208. switch (value) {
  209. case 1: /* manual, percent-based */
  210. amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
  211. break;
  212. default: /* disable */
  213. amdgpu_dpm_set_fan_control_mode(adev, 0);
  214. break;
  215. }
  216. return count;
  217. }
  218. static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
  219. struct device_attribute *attr,
  220. char *buf)
  221. {
  222. return sprintf(buf, "%i\n", 0);
  223. }
  224. static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
  225. struct device_attribute *attr,
  226. char *buf)
  227. {
  228. return sprintf(buf, "%i\n", 255);
  229. }
  230. static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
  231. struct device_attribute *attr,
  232. const char *buf, size_t count)
  233. {
  234. struct amdgpu_device *adev = dev_get_drvdata(dev);
  235. int err;
  236. u32 value;
  237. err = kstrtou32(buf, 10, &value);
  238. if (err)
  239. return err;
  240. value = (value * 100) / 255;
  241. err = amdgpu_dpm_set_fan_speed_percent(adev, value);
  242. if (err)
  243. return err;
  244. return count;
  245. }
  246. static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
  247. struct device_attribute *attr,
  248. char *buf)
  249. {
  250. struct amdgpu_device *adev = dev_get_drvdata(dev);
  251. int err;
  252. u32 speed;
  253. err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
  254. if (err)
  255. return err;
  256. speed = (speed * 255) / 100;
  257. return sprintf(buf, "%i\n", speed);
  258. }
  259. static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
  260. static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
  261. static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
  262. static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
  263. static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
  264. static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
  265. static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
  266. static struct attribute *hwmon_attributes[] = {
  267. &sensor_dev_attr_temp1_input.dev_attr.attr,
  268. &sensor_dev_attr_temp1_crit.dev_attr.attr,
  269. &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
  270. &sensor_dev_attr_pwm1.dev_attr.attr,
  271. &sensor_dev_attr_pwm1_enable.dev_attr.attr,
  272. &sensor_dev_attr_pwm1_min.dev_attr.attr,
  273. &sensor_dev_attr_pwm1_max.dev_attr.attr,
  274. NULL
  275. };
  276. static umode_t hwmon_attributes_visible(struct kobject *kobj,
  277. struct attribute *attr, int index)
  278. {
  279. struct device *dev = container_of(kobj, struct device, kobj);
  280. struct amdgpu_device *adev = dev_get_drvdata(dev);
  281. umode_t effective_mode = attr->mode;
  282. if (amdgpu_powerplay)
  283. return 0; /* to do */
  284. /* Skip limit attributes if DPM is not enabled */
  285. if (!adev->pm.dpm_enabled &&
  286. (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
  287. attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
  288. attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  289. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  290. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  291. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  292. return 0;
  293. /* Skip fan attributes if fan is not present */
  294. if (adev->pm.no_fan &&
  295. (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  296. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  297. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  298. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  299. return 0;
  300. /* mask fan attributes if we have no bindings for this asic to expose */
  301. if ((!adev->pm.funcs->get_fan_speed_percent &&
  302. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
  303. (!adev->pm.funcs->get_fan_control_mode &&
  304. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
  305. effective_mode &= ~S_IRUGO;
  306. if ((!adev->pm.funcs->set_fan_speed_percent &&
  307. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
  308. (!adev->pm.funcs->set_fan_control_mode &&
  309. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
  310. effective_mode &= ~S_IWUSR;
  311. /* hide max/min values if we can't both query and manage the fan */
  312. if ((!adev->pm.funcs->set_fan_speed_percent &&
  313. !adev->pm.funcs->get_fan_speed_percent) &&
  314. (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  315. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  316. return 0;
  317. return effective_mode;
  318. }
  319. static const struct attribute_group hwmon_attrgroup = {
  320. .attrs = hwmon_attributes,
  321. .is_visible = hwmon_attributes_visible,
  322. };
  323. static const struct attribute_group *hwmon_groups[] = {
  324. &hwmon_attrgroup,
  325. NULL
  326. };
  327. void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
  328. {
  329. struct amdgpu_device *adev =
  330. container_of(work, struct amdgpu_device,
  331. pm.dpm.thermal.work);
  332. /* switch to the thermal state */
  333. enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
  334. if (!adev->pm.dpm_enabled)
  335. return;
  336. if (adev->pm.funcs->get_temperature) {
  337. int temp = amdgpu_dpm_get_temperature(adev);
  338. if (temp < adev->pm.dpm.thermal.min_temp)
  339. /* switch back the user state */
  340. dpm_state = adev->pm.dpm.user_state;
  341. } else {
  342. if (adev->pm.dpm.thermal.high_to_low)
  343. /* switch back the user state */
  344. dpm_state = adev->pm.dpm.user_state;
  345. }
  346. mutex_lock(&adev->pm.mutex);
  347. if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
  348. adev->pm.dpm.thermal_active = true;
  349. else
  350. adev->pm.dpm.thermal_active = false;
  351. adev->pm.dpm.state = dpm_state;
  352. mutex_unlock(&adev->pm.mutex);
  353. amdgpu_pm_compute_clocks(adev);
  354. }
  355. static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
  356. enum amd_pm_state_type dpm_state)
  357. {
  358. int i;
  359. struct amdgpu_ps *ps;
  360. u32 ui_class;
  361. bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
  362. true : false;
  363. /* check if the vblank period is too short to adjust the mclk */
  364. if (single_display && adev->pm.funcs->vblank_too_short) {
  365. if (amdgpu_dpm_vblank_too_short(adev))
  366. single_display = false;
  367. }
  368. /* certain older asics have a separare 3D performance state,
  369. * so try that first if the user selected performance
  370. */
  371. if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
  372. dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
  373. /* balanced states don't exist at the moment */
  374. if (dpm_state == POWER_STATE_TYPE_BALANCED)
  375. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  376. restart_search:
  377. /* Pick the best power state based on current conditions */
  378. for (i = 0; i < adev->pm.dpm.num_ps; i++) {
  379. ps = &adev->pm.dpm.ps[i];
  380. ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
  381. switch (dpm_state) {
  382. /* user states */
  383. case POWER_STATE_TYPE_BATTERY:
  384. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
  385. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  386. if (single_display)
  387. return ps;
  388. } else
  389. return ps;
  390. }
  391. break;
  392. case POWER_STATE_TYPE_BALANCED:
  393. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
  394. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  395. if (single_display)
  396. return ps;
  397. } else
  398. return ps;
  399. }
  400. break;
  401. case POWER_STATE_TYPE_PERFORMANCE:
  402. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
  403. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  404. if (single_display)
  405. return ps;
  406. } else
  407. return ps;
  408. }
  409. break;
  410. /* internal states */
  411. case POWER_STATE_TYPE_INTERNAL_UVD:
  412. if (adev->pm.dpm.uvd_ps)
  413. return adev->pm.dpm.uvd_ps;
  414. else
  415. break;
  416. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  417. if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  418. return ps;
  419. break;
  420. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  421. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  422. return ps;
  423. break;
  424. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  425. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  426. return ps;
  427. break;
  428. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  429. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  430. return ps;
  431. break;
  432. case POWER_STATE_TYPE_INTERNAL_BOOT:
  433. return adev->pm.dpm.boot_ps;
  434. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  435. if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  436. return ps;
  437. break;
  438. case POWER_STATE_TYPE_INTERNAL_ACPI:
  439. if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  440. return ps;
  441. break;
  442. case POWER_STATE_TYPE_INTERNAL_ULV:
  443. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  444. return ps;
  445. break;
  446. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  447. if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  448. return ps;
  449. break;
  450. default:
  451. break;
  452. }
  453. }
  454. /* use a fallback state if we didn't match */
  455. switch (dpm_state) {
  456. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  457. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
  458. goto restart_search;
  459. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  460. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  461. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  462. if (adev->pm.dpm.uvd_ps) {
  463. return adev->pm.dpm.uvd_ps;
  464. } else {
  465. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  466. goto restart_search;
  467. }
  468. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  469. dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
  470. goto restart_search;
  471. case POWER_STATE_TYPE_INTERNAL_ACPI:
  472. dpm_state = POWER_STATE_TYPE_BATTERY;
  473. goto restart_search;
  474. case POWER_STATE_TYPE_BATTERY:
  475. case POWER_STATE_TYPE_BALANCED:
  476. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  477. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  478. goto restart_search;
  479. default:
  480. break;
  481. }
  482. return NULL;
  483. }
  484. static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
  485. {
  486. int i;
  487. struct amdgpu_ps *ps;
  488. enum amd_pm_state_type dpm_state;
  489. int ret;
  490. /* if dpm init failed */
  491. if (!adev->pm.dpm_enabled)
  492. return;
  493. if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
  494. /* add other state override checks here */
  495. if ((!adev->pm.dpm.thermal_active) &&
  496. (!adev->pm.dpm.uvd_active))
  497. adev->pm.dpm.state = adev->pm.dpm.user_state;
  498. }
  499. dpm_state = adev->pm.dpm.state;
  500. ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
  501. if (ps)
  502. adev->pm.dpm.requested_ps = ps;
  503. else
  504. return;
  505. /* no need to reprogram if nothing changed unless we are on BTC+ */
  506. if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
  507. /* vce just modifies an existing state so force a change */
  508. if (ps->vce_active != adev->pm.dpm.vce_active)
  509. goto force;
  510. if (adev->flags & AMD_IS_APU) {
  511. /* for APUs if the num crtcs changed but state is the same,
  512. * all we need to do is update the display configuration.
  513. */
  514. if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
  515. /* update display watermarks based on new power state */
  516. amdgpu_display_bandwidth_update(adev);
  517. /* update displays */
  518. amdgpu_dpm_display_configuration_changed(adev);
  519. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  520. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  521. }
  522. return;
  523. } else {
  524. /* for BTC+ if the num crtcs hasn't changed and state is the same,
  525. * nothing to do, if the num crtcs is > 1 and state is the same,
  526. * update display configuration.
  527. */
  528. if (adev->pm.dpm.new_active_crtcs ==
  529. adev->pm.dpm.current_active_crtcs) {
  530. return;
  531. } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
  532. (adev->pm.dpm.new_active_crtc_count > 1)) {
  533. /* update display watermarks based on new power state */
  534. amdgpu_display_bandwidth_update(adev);
  535. /* update displays */
  536. amdgpu_dpm_display_configuration_changed(adev);
  537. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  538. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  539. return;
  540. }
  541. }
  542. }
  543. force:
  544. if (amdgpu_dpm == 1) {
  545. printk("switching from power state:\n");
  546. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
  547. printk("switching to power state:\n");
  548. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
  549. }
  550. mutex_lock(&adev->ring_lock);
  551. /* update whether vce is active */
  552. ps->vce_active = adev->pm.dpm.vce_active;
  553. ret = amdgpu_dpm_pre_set_power_state(adev);
  554. if (ret)
  555. goto done;
  556. /* update display watermarks based on new power state */
  557. amdgpu_display_bandwidth_update(adev);
  558. /* update displays */
  559. amdgpu_dpm_display_configuration_changed(adev);
  560. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  561. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  562. /* wait for the rings to drain */
  563. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  564. struct amdgpu_ring *ring = adev->rings[i];
  565. if (ring && ring->ready)
  566. amdgpu_fence_wait_empty(ring);
  567. }
  568. /* program the new power state */
  569. amdgpu_dpm_set_power_state(adev);
  570. /* update current power state */
  571. adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
  572. amdgpu_dpm_post_set_power_state(adev);
  573. if (adev->pm.funcs->force_performance_level) {
  574. if (adev->pm.dpm.thermal_active) {
  575. enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
  576. /* force low perf level for thermal */
  577. amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
  578. /* save the user's level */
  579. adev->pm.dpm.forced_level = level;
  580. } else {
  581. /* otherwise, user selected level */
  582. amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
  583. }
  584. }
  585. done:
  586. mutex_unlock(&adev->ring_lock);
  587. }
  588. void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
  589. {
  590. if (amdgpu_powerplay)
  591. amdgpu_dpm_powergate_uvd(adev, !enable);
  592. else {
  593. if (adev->pm.funcs->powergate_uvd) {
  594. mutex_lock(&adev->pm.mutex);
  595. /* enable/disable UVD */
  596. amdgpu_dpm_powergate_uvd(adev, !enable);
  597. mutex_unlock(&adev->pm.mutex);
  598. } else {
  599. if (enable) {
  600. mutex_lock(&adev->pm.mutex);
  601. adev->pm.dpm.uvd_active = true;
  602. adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
  603. mutex_unlock(&adev->pm.mutex);
  604. } else {
  605. mutex_lock(&adev->pm.mutex);
  606. adev->pm.dpm.uvd_active = false;
  607. mutex_unlock(&adev->pm.mutex);
  608. }
  609. amdgpu_pm_compute_clocks(adev);
  610. }
  611. }
  612. }
  613. void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
  614. {
  615. if (amdgpu_powerplay)
  616. amdgpu_dpm_powergate_vce(adev, !enable);
  617. else {
  618. if (adev->pm.funcs->powergate_vce) {
  619. mutex_lock(&adev->pm.mutex);
  620. amdgpu_dpm_powergate_vce(adev, !enable);
  621. mutex_unlock(&adev->pm.mutex);
  622. } else {
  623. if (enable) {
  624. mutex_lock(&adev->pm.mutex);
  625. adev->pm.dpm.vce_active = true;
  626. /* XXX select vce level based on ring/task */
  627. adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
  628. mutex_unlock(&adev->pm.mutex);
  629. } else {
  630. mutex_lock(&adev->pm.mutex);
  631. adev->pm.dpm.vce_active = false;
  632. mutex_unlock(&adev->pm.mutex);
  633. }
  634. amdgpu_pm_compute_clocks(adev);
  635. }
  636. }
  637. }
  638. void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
  639. {
  640. int i;
  641. if (amdgpu_powerplay)
  642. /* TO DO */
  643. return;
  644. for (i = 0; i < adev->pm.dpm.num_ps; i++)
  645. amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
  646. }
  647. int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
  648. {
  649. int ret;
  650. if (adev->pm.sysfs_initialized)
  651. return 0;
  652. if (!amdgpu_powerplay) {
  653. if (adev->pm.funcs->get_temperature == NULL)
  654. return 0;
  655. }
  656. adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
  657. DRIVER_NAME, adev,
  658. hwmon_groups);
  659. if (IS_ERR(adev->pm.int_hwmon_dev)) {
  660. ret = PTR_ERR(adev->pm.int_hwmon_dev);
  661. dev_err(adev->dev,
  662. "Unable to register hwmon device: %d\n", ret);
  663. return ret;
  664. }
  665. ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
  666. if (ret) {
  667. DRM_ERROR("failed to create device file for dpm state\n");
  668. return ret;
  669. }
  670. ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  671. if (ret) {
  672. DRM_ERROR("failed to create device file for dpm state\n");
  673. return ret;
  674. }
  675. ret = amdgpu_debugfs_pm_init(adev);
  676. if (ret) {
  677. DRM_ERROR("Failed to register debugfs file for dpm!\n");
  678. return ret;
  679. }
  680. adev->pm.sysfs_initialized = true;
  681. return 0;
  682. }
  683. void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
  684. {
  685. if (adev->pm.int_hwmon_dev)
  686. hwmon_device_unregister(adev->pm.int_hwmon_dev);
  687. device_remove_file(adev->dev, &dev_attr_power_dpm_state);
  688. device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  689. }
  690. void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
  691. {
  692. struct drm_device *ddev = adev->ddev;
  693. struct drm_crtc *crtc;
  694. struct amdgpu_crtc *amdgpu_crtc;
  695. if (!adev->pm.dpm_enabled)
  696. return;
  697. if (amdgpu_powerplay) {
  698. int i = 0;
  699. amdgpu_display_bandwidth_update(adev);
  700. mutex_lock(&adev->ring_lock);
  701. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  702. struct amdgpu_ring *ring = adev->rings[i];
  703. if (ring && ring->ready)
  704. amdgpu_fence_wait_empty(ring);
  705. }
  706. mutex_unlock(&adev->ring_lock);
  707. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
  708. } else {
  709. mutex_lock(&adev->pm.mutex);
  710. adev->pm.dpm.new_active_crtcs = 0;
  711. adev->pm.dpm.new_active_crtc_count = 0;
  712. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  713. list_for_each_entry(crtc,
  714. &ddev->mode_config.crtc_list, head) {
  715. amdgpu_crtc = to_amdgpu_crtc(crtc);
  716. if (crtc->enabled) {
  717. adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
  718. adev->pm.dpm.new_active_crtc_count++;
  719. }
  720. }
  721. }
  722. /* update battery/ac status */
  723. if (power_supply_is_system_supplied() > 0)
  724. adev->pm.dpm.ac_power = true;
  725. else
  726. adev->pm.dpm.ac_power = false;
  727. amdgpu_dpm_change_power_state_locked(adev);
  728. mutex_unlock(&adev->pm.mutex);
  729. }
  730. }
  731. /*
  732. * Debugfs info
  733. */
  734. #if defined(CONFIG_DEBUG_FS)
  735. static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
  736. {
  737. struct drm_info_node *node = (struct drm_info_node *) m->private;
  738. struct drm_device *dev = node->minor->dev;
  739. struct amdgpu_device *adev = dev->dev_private;
  740. if (!adev->pm.dpm_enabled) {
  741. seq_printf(m, "dpm not enabled\n");
  742. return 0;
  743. }
  744. if (amdgpu_powerplay) {
  745. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  746. } else {
  747. mutex_lock(&adev->pm.mutex);
  748. if (adev->pm.funcs->debugfs_print_current_performance_level)
  749. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  750. else
  751. seq_printf(m, "Debugfs support not implemented for this asic\n");
  752. mutex_unlock(&adev->pm.mutex);
  753. }
  754. return 0;
  755. }
  756. static struct drm_info_list amdgpu_pm_info_list[] = {
  757. {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
  758. };
  759. #endif
  760. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
  761. {
  762. #if defined(CONFIG_DEBUG_FS)
  763. return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
  764. #else
  765. return 0;
  766. #endif
  767. }