amdgpu_pm.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900
  1. /*
  2. * Permission is hereby granted, free of charge, to any person obtaining a
  3. * copy of this software and associated documentation files (the "Software"),
  4. * to deal in the Software without restriction, including without limitation
  5. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  6. * and/or sell copies of the Software, and to permit persons to whom the
  7. * Software is furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  15. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  16. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  17. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  18. * OTHER DEALINGS IN THE SOFTWARE.
  19. *
  20. * Authors: Rafał Miłecki <zajec5@gmail.com>
  21. * Alex Deucher <alexdeucher@gmail.com>
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_drv.h"
  26. #include "amdgpu_pm.h"
  27. #include "amdgpu_dpm.h"
  28. #include "atom.h"
  29. #include <linux/power_supply.h>
  30. #include <linux/hwmon.h>
  31. #include <linux/hwmon-sysfs.h>
  32. #include "amd_powerplay.h"
  33. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
  34. void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
  35. {
  36. if (adev->pp_enabled)
  37. /* TODO */
  38. return;
  39. if (adev->pm.dpm_enabled) {
  40. mutex_lock(&adev->pm.mutex);
  41. if (power_supply_is_system_supplied() > 0)
  42. adev->pm.dpm.ac_power = true;
  43. else
  44. adev->pm.dpm.ac_power = false;
  45. if (adev->pm.funcs->enable_bapm)
  46. amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
  47. mutex_unlock(&adev->pm.mutex);
  48. }
  49. }
  50. static ssize_t amdgpu_get_dpm_state(struct device *dev,
  51. struct device_attribute *attr,
  52. char *buf)
  53. {
  54. struct drm_device *ddev = dev_get_drvdata(dev);
  55. struct amdgpu_device *adev = ddev->dev_private;
  56. enum amd_pm_state_type pm;
  57. if (adev->pp_enabled) {
  58. pm = amdgpu_dpm_get_current_power_state(adev);
  59. } else
  60. pm = adev->pm.dpm.user_state;
  61. return snprintf(buf, PAGE_SIZE, "%s\n",
  62. (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
  63. (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
  64. }
  65. static ssize_t amdgpu_set_dpm_state(struct device *dev,
  66. struct device_attribute *attr,
  67. const char *buf,
  68. size_t count)
  69. {
  70. struct drm_device *ddev = dev_get_drvdata(dev);
  71. struct amdgpu_device *adev = ddev->dev_private;
  72. enum amd_pm_state_type state;
  73. if (strncmp("battery", buf, strlen("battery")) == 0)
  74. state = POWER_STATE_TYPE_BATTERY;
  75. else if (strncmp("balanced", buf, strlen("balanced")) == 0)
  76. state = POWER_STATE_TYPE_BALANCED;
  77. else if (strncmp("performance", buf, strlen("performance")) == 0)
  78. state = POWER_STATE_TYPE_PERFORMANCE;
  79. else {
  80. count = -EINVAL;
  81. goto fail;
  82. }
  83. if (adev->pp_enabled) {
  84. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
  85. } else {
  86. mutex_lock(&adev->pm.mutex);
  87. adev->pm.dpm.user_state = state;
  88. mutex_unlock(&adev->pm.mutex);
  89. /* Can't set dpm state when the card is off */
  90. if (!(adev->flags & AMD_IS_PX) ||
  91. (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
  92. amdgpu_pm_compute_clocks(adev);
  93. }
  94. fail:
  95. return count;
  96. }
  97. static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
  98. struct device_attribute *attr,
  99. char *buf)
  100. {
  101. struct drm_device *ddev = dev_get_drvdata(dev);
  102. struct amdgpu_device *adev = ddev->dev_private;
  103. if ((adev->flags & AMD_IS_PX) &&
  104. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  105. return snprintf(buf, PAGE_SIZE, "off\n");
  106. if (adev->pp_enabled) {
  107. enum amd_dpm_forced_level level;
  108. level = amdgpu_dpm_get_performance_level(adev);
  109. return snprintf(buf, PAGE_SIZE, "%s\n",
  110. (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  111. (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
  112. } else {
  113. enum amdgpu_dpm_forced_level level;
  114. level = adev->pm.dpm.forced_level;
  115. return snprintf(buf, PAGE_SIZE, "%s\n",
  116. (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  117. (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
  118. }
  119. }
  120. static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
  121. struct device_attribute *attr,
  122. const char *buf,
  123. size_t count)
  124. {
  125. struct drm_device *ddev = dev_get_drvdata(dev);
  126. struct amdgpu_device *adev = ddev->dev_private;
  127. enum amdgpu_dpm_forced_level level;
  128. int ret = 0;
  129. /* Can't force performance level when the card is off */
  130. if ((adev->flags & AMD_IS_PX) &&
  131. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  132. return -EINVAL;
  133. if (strncmp("low", buf, strlen("low")) == 0) {
  134. level = AMDGPU_DPM_FORCED_LEVEL_LOW;
  135. } else if (strncmp("high", buf, strlen("high")) == 0) {
  136. level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
  137. } else if (strncmp("auto", buf, strlen("auto")) == 0) {
  138. level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
  139. } else {
  140. count = -EINVAL;
  141. goto fail;
  142. }
  143. if (adev->pp_enabled)
  144. amdgpu_dpm_force_performance_level(adev, level);
  145. else {
  146. mutex_lock(&adev->pm.mutex);
  147. if (adev->pm.dpm.thermal_active) {
  148. count = -EINVAL;
  149. mutex_unlock(&adev->pm.mutex);
  150. goto fail;
  151. }
  152. ret = amdgpu_dpm_force_performance_level(adev, level);
  153. if (ret)
  154. count = -EINVAL;
  155. else
  156. adev->pm.dpm.forced_level = level;
  157. mutex_unlock(&adev->pm.mutex);
  158. }
  159. fail:
  160. return count;
  161. }
  162. static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
  163. static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
  164. amdgpu_get_dpm_forced_performance_level,
  165. amdgpu_set_dpm_forced_performance_level);
  166. static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
  167. struct device_attribute *attr,
  168. char *buf)
  169. {
  170. struct amdgpu_device *adev = dev_get_drvdata(dev);
  171. struct drm_device *ddev = adev->ddev;
  172. int temp;
  173. /* Can't get temperature when the card is off */
  174. if ((adev->flags & AMD_IS_PX) &&
  175. (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
  176. return -EINVAL;
  177. if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
  178. temp = 0;
  179. else
  180. temp = amdgpu_dpm_get_temperature(adev);
  181. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  182. }
  183. static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
  184. struct device_attribute *attr,
  185. char *buf)
  186. {
  187. struct amdgpu_device *adev = dev_get_drvdata(dev);
  188. int hyst = to_sensor_dev_attr(attr)->index;
  189. int temp;
  190. if (hyst)
  191. temp = adev->pm.dpm.thermal.min_temp;
  192. else
  193. temp = adev->pm.dpm.thermal.max_temp;
  194. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  195. }
  196. static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
  197. struct device_attribute *attr,
  198. char *buf)
  199. {
  200. struct amdgpu_device *adev = dev_get_drvdata(dev);
  201. u32 pwm_mode = 0;
  202. if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
  203. return -EINVAL;
  204. pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
  205. /* never 0 (full-speed), fuse or smc-controlled always */
  206. return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
  207. }
  208. static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
  209. struct device_attribute *attr,
  210. const char *buf,
  211. size_t count)
  212. {
  213. struct amdgpu_device *adev = dev_get_drvdata(dev);
  214. int err;
  215. int value;
  216. if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
  217. return -EINVAL;
  218. err = kstrtoint(buf, 10, &value);
  219. if (err)
  220. return err;
  221. switch (value) {
  222. case 1: /* manual, percent-based */
  223. amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
  224. break;
  225. default: /* disable */
  226. amdgpu_dpm_set_fan_control_mode(adev, 0);
  227. break;
  228. }
  229. return count;
  230. }
  231. static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
  232. struct device_attribute *attr,
  233. char *buf)
  234. {
  235. return sprintf(buf, "%i\n", 0);
  236. }
  237. static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
  238. struct device_attribute *attr,
  239. char *buf)
  240. {
  241. return sprintf(buf, "%i\n", 255);
  242. }
  243. static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
  244. struct device_attribute *attr,
  245. const char *buf, size_t count)
  246. {
  247. struct amdgpu_device *adev = dev_get_drvdata(dev);
  248. int err;
  249. u32 value;
  250. err = kstrtou32(buf, 10, &value);
  251. if (err)
  252. return err;
  253. value = (value * 100) / 255;
  254. err = amdgpu_dpm_set_fan_speed_percent(adev, value);
  255. if (err)
  256. return err;
  257. return count;
  258. }
  259. static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
  260. struct device_attribute *attr,
  261. char *buf)
  262. {
  263. struct amdgpu_device *adev = dev_get_drvdata(dev);
  264. int err;
  265. u32 speed;
  266. err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
  267. if (err)
  268. return err;
  269. speed = (speed * 255) / 100;
  270. return sprintf(buf, "%i\n", speed);
  271. }
  272. static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
  273. static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
  274. static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
  275. static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
  276. static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
  277. static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
  278. static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
  279. static struct attribute *hwmon_attributes[] = {
  280. &sensor_dev_attr_temp1_input.dev_attr.attr,
  281. &sensor_dev_attr_temp1_crit.dev_attr.attr,
  282. &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
  283. &sensor_dev_attr_pwm1.dev_attr.attr,
  284. &sensor_dev_attr_pwm1_enable.dev_attr.attr,
  285. &sensor_dev_attr_pwm1_min.dev_attr.attr,
  286. &sensor_dev_attr_pwm1_max.dev_attr.attr,
  287. NULL
  288. };
  289. static umode_t hwmon_attributes_visible(struct kobject *kobj,
  290. struct attribute *attr, int index)
  291. {
  292. struct device *dev = kobj_to_dev(kobj);
  293. struct amdgpu_device *adev = dev_get_drvdata(dev);
  294. umode_t effective_mode = attr->mode;
  295. /* Skip limit attributes if DPM is not enabled */
  296. if (!adev->pm.dpm_enabled &&
  297. (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
  298. attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
  299. attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  300. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  301. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  302. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  303. return 0;
  304. if (adev->pp_enabled)
  305. return effective_mode;
  306. /* Skip fan attributes if fan is not present */
  307. if (adev->pm.no_fan &&
  308. (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  309. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  310. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  311. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  312. return 0;
  313. /* mask fan attributes if we have no bindings for this asic to expose */
  314. if ((!adev->pm.funcs->get_fan_speed_percent &&
  315. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
  316. (!adev->pm.funcs->get_fan_control_mode &&
  317. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
  318. effective_mode &= ~S_IRUGO;
  319. if ((!adev->pm.funcs->set_fan_speed_percent &&
  320. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
  321. (!adev->pm.funcs->set_fan_control_mode &&
  322. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
  323. effective_mode &= ~S_IWUSR;
  324. /* hide max/min values if we can't both query and manage the fan */
  325. if ((!adev->pm.funcs->set_fan_speed_percent &&
  326. !adev->pm.funcs->get_fan_speed_percent) &&
  327. (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  328. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  329. return 0;
  330. return effective_mode;
  331. }
  332. static const struct attribute_group hwmon_attrgroup = {
  333. .attrs = hwmon_attributes,
  334. .is_visible = hwmon_attributes_visible,
  335. };
  336. static const struct attribute_group *hwmon_groups[] = {
  337. &hwmon_attrgroup,
  338. NULL
  339. };
  340. void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
  341. {
  342. struct amdgpu_device *adev =
  343. container_of(work, struct amdgpu_device,
  344. pm.dpm.thermal.work);
  345. /* switch to the thermal state */
  346. enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
  347. if (!adev->pm.dpm_enabled)
  348. return;
  349. if (adev->pm.funcs->get_temperature) {
  350. int temp = amdgpu_dpm_get_temperature(adev);
  351. if (temp < adev->pm.dpm.thermal.min_temp)
  352. /* switch back the user state */
  353. dpm_state = adev->pm.dpm.user_state;
  354. } else {
  355. if (adev->pm.dpm.thermal.high_to_low)
  356. /* switch back the user state */
  357. dpm_state = adev->pm.dpm.user_state;
  358. }
  359. mutex_lock(&adev->pm.mutex);
  360. if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
  361. adev->pm.dpm.thermal_active = true;
  362. else
  363. adev->pm.dpm.thermal_active = false;
  364. adev->pm.dpm.state = dpm_state;
  365. mutex_unlock(&adev->pm.mutex);
  366. amdgpu_pm_compute_clocks(adev);
  367. }
  368. static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
  369. enum amd_pm_state_type dpm_state)
  370. {
  371. int i;
  372. struct amdgpu_ps *ps;
  373. u32 ui_class;
  374. bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
  375. true : false;
  376. /* check if the vblank period is too short to adjust the mclk */
  377. if (single_display && adev->pm.funcs->vblank_too_short) {
  378. if (amdgpu_dpm_vblank_too_short(adev))
  379. single_display = false;
  380. }
  381. /* certain older asics have a separare 3D performance state,
  382. * so try that first if the user selected performance
  383. */
  384. if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
  385. dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
  386. /* balanced states don't exist at the moment */
  387. if (dpm_state == POWER_STATE_TYPE_BALANCED)
  388. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  389. restart_search:
  390. /* Pick the best power state based on current conditions */
  391. for (i = 0; i < adev->pm.dpm.num_ps; i++) {
  392. ps = &adev->pm.dpm.ps[i];
  393. ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
  394. switch (dpm_state) {
  395. /* user states */
  396. case POWER_STATE_TYPE_BATTERY:
  397. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
  398. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  399. if (single_display)
  400. return ps;
  401. } else
  402. return ps;
  403. }
  404. break;
  405. case POWER_STATE_TYPE_BALANCED:
  406. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
  407. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  408. if (single_display)
  409. return ps;
  410. } else
  411. return ps;
  412. }
  413. break;
  414. case POWER_STATE_TYPE_PERFORMANCE:
  415. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
  416. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  417. if (single_display)
  418. return ps;
  419. } else
  420. return ps;
  421. }
  422. break;
  423. /* internal states */
  424. case POWER_STATE_TYPE_INTERNAL_UVD:
  425. if (adev->pm.dpm.uvd_ps)
  426. return adev->pm.dpm.uvd_ps;
  427. else
  428. break;
  429. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  430. if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  431. return ps;
  432. break;
  433. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  434. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  435. return ps;
  436. break;
  437. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  438. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  439. return ps;
  440. break;
  441. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  442. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  443. return ps;
  444. break;
  445. case POWER_STATE_TYPE_INTERNAL_BOOT:
  446. return adev->pm.dpm.boot_ps;
  447. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  448. if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  449. return ps;
  450. break;
  451. case POWER_STATE_TYPE_INTERNAL_ACPI:
  452. if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  453. return ps;
  454. break;
  455. case POWER_STATE_TYPE_INTERNAL_ULV:
  456. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  457. return ps;
  458. break;
  459. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  460. if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  461. return ps;
  462. break;
  463. default:
  464. break;
  465. }
  466. }
  467. /* use a fallback state if we didn't match */
  468. switch (dpm_state) {
  469. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  470. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
  471. goto restart_search;
  472. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  473. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  474. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  475. if (adev->pm.dpm.uvd_ps) {
  476. return adev->pm.dpm.uvd_ps;
  477. } else {
  478. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  479. goto restart_search;
  480. }
  481. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  482. dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
  483. goto restart_search;
  484. case POWER_STATE_TYPE_INTERNAL_ACPI:
  485. dpm_state = POWER_STATE_TYPE_BATTERY;
  486. goto restart_search;
  487. case POWER_STATE_TYPE_BATTERY:
  488. case POWER_STATE_TYPE_BALANCED:
  489. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  490. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  491. goto restart_search;
  492. default:
  493. break;
  494. }
  495. return NULL;
  496. }
  497. static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
  498. {
  499. int i;
  500. struct amdgpu_ps *ps;
  501. enum amd_pm_state_type dpm_state;
  502. int ret;
  503. /* if dpm init failed */
  504. if (!adev->pm.dpm_enabled)
  505. return;
  506. if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
  507. /* add other state override checks here */
  508. if ((!adev->pm.dpm.thermal_active) &&
  509. (!adev->pm.dpm.uvd_active))
  510. adev->pm.dpm.state = adev->pm.dpm.user_state;
  511. }
  512. dpm_state = adev->pm.dpm.state;
  513. ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
  514. if (ps)
  515. adev->pm.dpm.requested_ps = ps;
  516. else
  517. return;
  518. /* no need to reprogram if nothing changed unless we are on BTC+ */
  519. if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
  520. /* vce just modifies an existing state so force a change */
  521. if (ps->vce_active != adev->pm.dpm.vce_active)
  522. goto force;
  523. if (adev->flags & AMD_IS_APU) {
  524. /* for APUs if the num crtcs changed but state is the same,
  525. * all we need to do is update the display configuration.
  526. */
  527. if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
  528. /* update display watermarks based on new power state */
  529. amdgpu_display_bandwidth_update(adev);
  530. /* update displays */
  531. amdgpu_dpm_display_configuration_changed(adev);
  532. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  533. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  534. }
  535. return;
  536. } else {
  537. /* for BTC+ if the num crtcs hasn't changed and state is the same,
  538. * nothing to do, if the num crtcs is > 1 and state is the same,
  539. * update display configuration.
  540. */
  541. if (adev->pm.dpm.new_active_crtcs ==
  542. adev->pm.dpm.current_active_crtcs) {
  543. return;
  544. } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
  545. (adev->pm.dpm.new_active_crtc_count > 1)) {
  546. /* update display watermarks based on new power state */
  547. amdgpu_display_bandwidth_update(adev);
  548. /* update displays */
  549. amdgpu_dpm_display_configuration_changed(adev);
  550. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  551. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  552. return;
  553. }
  554. }
  555. }
  556. force:
  557. if (amdgpu_dpm == 1) {
  558. printk("switching from power state:\n");
  559. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
  560. printk("switching to power state:\n");
  561. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
  562. }
  563. mutex_lock(&adev->ring_lock);
  564. /* update whether vce is active */
  565. ps->vce_active = adev->pm.dpm.vce_active;
  566. ret = amdgpu_dpm_pre_set_power_state(adev);
  567. if (ret)
  568. goto done;
  569. /* update display watermarks based on new power state */
  570. amdgpu_display_bandwidth_update(adev);
  571. /* wait for the rings to drain */
  572. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  573. struct amdgpu_ring *ring = adev->rings[i];
  574. if (ring && ring->ready)
  575. amdgpu_fence_wait_empty(ring);
  576. }
  577. /* program the new power state */
  578. amdgpu_dpm_set_power_state(adev);
  579. /* update current power state */
  580. adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
  581. amdgpu_dpm_post_set_power_state(adev);
  582. /* update displays */
  583. amdgpu_dpm_display_configuration_changed(adev);
  584. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  585. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  586. if (adev->pm.funcs->force_performance_level) {
  587. if (adev->pm.dpm.thermal_active) {
  588. enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
  589. /* force low perf level for thermal */
  590. amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
  591. /* save the user's level */
  592. adev->pm.dpm.forced_level = level;
  593. } else {
  594. /* otherwise, user selected level */
  595. amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
  596. }
  597. }
  598. done:
  599. mutex_unlock(&adev->ring_lock);
  600. }
  601. void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
  602. {
  603. if (adev->pp_enabled)
  604. amdgpu_dpm_powergate_uvd(adev, !enable);
  605. else {
  606. if (adev->pm.funcs->powergate_uvd) {
  607. mutex_lock(&adev->pm.mutex);
  608. /* enable/disable UVD */
  609. amdgpu_dpm_powergate_uvd(adev, !enable);
  610. mutex_unlock(&adev->pm.mutex);
  611. } else {
  612. if (enable) {
  613. mutex_lock(&adev->pm.mutex);
  614. adev->pm.dpm.uvd_active = true;
  615. adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
  616. mutex_unlock(&adev->pm.mutex);
  617. } else {
  618. mutex_lock(&adev->pm.mutex);
  619. adev->pm.dpm.uvd_active = false;
  620. mutex_unlock(&adev->pm.mutex);
  621. }
  622. amdgpu_pm_compute_clocks(adev);
  623. }
  624. }
  625. }
  626. void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
  627. {
  628. if (adev->pp_enabled)
  629. amdgpu_dpm_powergate_vce(adev, !enable);
  630. else {
  631. if (adev->pm.funcs->powergate_vce) {
  632. mutex_lock(&adev->pm.mutex);
  633. amdgpu_dpm_powergate_vce(adev, !enable);
  634. mutex_unlock(&adev->pm.mutex);
  635. } else {
  636. if (enable) {
  637. mutex_lock(&adev->pm.mutex);
  638. adev->pm.dpm.vce_active = true;
  639. /* XXX select vce level based on ring/task */
  640. adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
  641. mutex_unlock(&adev->pm.mutex);
  642. } else {
  643. mutex_lock(&adev->pm.mutex);
  644. adev->pm.dpm.vce_active = false;
  645. mutex_unlock(&adev->pm.mutex);
  646. }
  647. amdgpu_pm_compute_clocks(adev);
  648. }
  649. }
  650. }
  651. void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
  652. {
  653. int i;
  654. if (adev->pp_enabled)
  655. /* TO DO */
  656. return;
  657. for (i = 0; i < adev->pm.dpm.num_ps; i++)
  658. amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
  659. }
  660. int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
  661. {
  662. int ret;
  663. if (adev->pm.sysfs_initialized)
  664. return 0;
  665. if (!adev->pp_enabled) {
  666. if (adev->pm.funcs->get_temperature == NULL)
  667. return 0;
  668. }
  669. adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
  670. DRIVER_NAME, adev,
  671. hwmon_groups);
  672. if (IS_ERR(adev->pm.int_hwmon_dev)) {
  673. ret = PTR_ERR(adev->pm.int_hwmon_dev);
  674. dev_err(adev->dev,
  675. "Unable to register hwmon device: %d\n", ret);
  676. return ret;
  677. }
  678. ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
  679. if (ret) {
  680. DRM_ERROR("failed to create device file for dpm state\n");
  681. return ret;
  682. }
  683. ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  684. if (ret) {
  685. DRM_ERROR("failed to create device file for dpm state\n");
  686. return ret;
  687. }
  688. ret = amdgpu_debugfs_pm_init(adev);
  689. if (ret) {
  690. DRM_ERROR("Failed to register debugfs file for dpm!\n");
  691. return ret;
  692. }
  693. adev->pm.sysfs_initialized = true;
  694. return 0;
  695. }
  696. void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
  697. {
  698. if (adev->pm.int_hwmon_dev)
  699. hwmon_device_unregister(adev->pm.int_hwmon_dev);
  700. device_remove_file(adev->dev, &dev_attr_power_dpm_state);
  701. device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  702. }
  703. void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
  704. {
  705. struct drm_device *ddev = adev->ddev;
  706. struct drm_crtc *crtc;
  707. struct amdgpu_crtc *amdgpu_crtc;
  708. if (!adev->pm.dpm_enabled)
  709. return;
  710. if (adev->pp_enabled) {
  711. int i = 0;
  712. amdgpu_display_bandwidth_update(adev);
  713. mutex_lock(&adev->ring_lock);
  714. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  715. struct amdgpu_ring *ring = adev->rings[i];
  716. if (ring && ring->ready)
  717. amdgpu_fence_wait_empty(ring);
  718. }
  719. mutex_unlock(&adev->ring_lock);
  720. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
  721. } else {
  722. mutex_lock(&adev->pm.mutex);
  723. adev->pm.dpm.new_active_crtcs = 0;
  724. adev->pm.dpm.new_active_crtc_count = 0;
  725. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  726. list_for_each_entry(crtc,
  727. &ddev->mode_config.crtc_list, head) {
  728. amdgpu_crtc = to_amdgpu_crtc(crtc);
  729. if (crtc->enabled) {
  730. adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
  731. adev->pm.dpm.new_active_crtc_count++;
  732. }
  733. }
  734. }
  735. /* update battery/ac status */
  736. if (power_supply_is_system_supplied() > 0)
  737. adev->pm.dpm.ac_power = true;
  738. else
  739. adev->pm.dpm.ac_power = false;
  740. amdgpu_dpm_change_power_state_locked(adev);
  741. mutex_unlock(&adev->pm.mutex);
  742. }
  743. }
  744. /*
  745. * Debugfs info
  746. */
  747. #if defined(CONFIG_DEBUG_FS)
  748. static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
  749. {
  750. struct drm_info_node *node = (struct drm_info_node *) m->private;
  751. struct drm_device *dev = node->minor->dev;
  752. struct amdgpu_device *adev = dev->dev_private;
  753. struct drm_device *ddev = adev->ddev;
  754. if (!adev->pm.dpm_enabled) {
  755. seq_printf(m, "dpm not enabled\n");
  756. return 0;
  757. }
  758. if ((adev->flags & AMD_IS_PX) &&
  759. (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
  760. seq_printf(m, "PX asic powered off\n");
  761. } else if (adev->pp_enabled) {
  762. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  763. } else {
  764. mutex_lock(&adev->pm.mutex);
  765. if (adev->pm.funcs->debugfs_print_current_performance_level)
  766. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  767. else
  768. seq_printf(m, "Debugfs support not implemented for this asic\n");
  769. mutex_unlock(&adev->pm.mutex);
  770. }
  771. return 0;
  772. }
  773. static struct drm_info_list amdgpu_pm_info_list[] = {
  774. {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
  775. };
  776. #endif
  777. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
  778. {
  779. #if defined(CONFIG_DEBUG_FS)
  780. return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
  781. #else
  782. return 0;
  783. #endif
  784. }