amdgpu_pm.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. /*
  2. * Permission is hereby granted, free of charge, to any person obtaining a
  3. * copy of this software and associated documentation files (the "Software"),
  4. * to deal in the Software without restriction, including without limitation
  5. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  6. * and/or sell copies of the Software, and to permit persons to whom the
  7. * Software is furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  15. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  16. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  17. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  18. * OTHER DEALINGS IN THE SOFTWARE.
  19. *
  20. * Authors: Rafał Miłecki <zajec5@gmail.com>
  21. * Alex Deucher <alexdeucher@gmail.com>
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_drv.h"
  26. #include "amdgpu_pm.h"
  27. #include "amdgpu_dpm.h"
  28. #include "atom.h"
  29. #include <linux/power_supply.h>
  30. #include <linux/hwmon.h>
  31. #include <linux/hwmon-sysfs.h>
  32. #include "amd_powerplay.h"
  33. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
  34. void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
  35. {
  36. if (adev->pp_enabled)
  37. /* TODO */
  38. return;
  39. if (adev->pm.dpm_enabled) {
  40. mutex_lock(&adev->pm.mutex);
  41. if (power_supply_is_system_supplied() > 0)
  42. adev->pm.dpm.ac_power = true;
  43. else
  44. adev->pm.dpm.ac_power = false;
  45. if (adev->pm.funcs->enable_bapm)
  46. amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
  47. mutex_unlock(&adev->pm.mutex);
  48. }
  49. }
  50. static ssize_t amdgpu_get_dpm_state(struct device *dev,
  51. struct device_attribute *attr,
  52. char *buf)
  53. {
  54. struct drm_device *ddev = dev_get_drvdata(dev);
  55. struct amdgpu_device *adev = ddev->dev_private;
  56. enum amd_pm_state_type pm;
  57. if (adev->pp_enabled) {
  58. pm = amdgpu_dpm_get_current_power_state(adev);
  59. } else
  60. pm = adev->pm.dpm.user_state;
  61. return snprintf(buf, PAGE_SIZE, "%s\n",
  62. (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
  63. (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
  64. }
  65. static ssize_t amdgpu_set_dpm_state(struct device *dev,
  66. struct device_attribute *attr,
  67. const char *buf,
  68. size_t count)
  69. {
  70. struct drm_device *ddev = dev_get_drvdata(dev);
  71. struct amdgpu_device *adev = ddev->dev_private;
  72. enum amd_pm_state_type state;
  73. if (strncmp("battery", buf, strlen("battery")) == 0)
  74. state = POWER_STATE_TYPE_BATTERY;
  75. else if (strncmp("balanced", buf, strlen("balanced")) == 0)
  76. state = POWER_STATE_TYPE_BALANCED;
  77. else if (strncmp("performance", buf, strlen("performance")) == 0)
  78. state = POWER_STATE_TYPE_PERFORMANCE;
  79. else {
  80. count = -EINVAL;
  81. goto fail;
  82. }
  83. if (adev->pp_enabled) {
  84. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
  85. } else {
  86. mutex_lock(&adev->pm.mutex);
  87. adev->pm.dpm.user_state = state;
  88. mutex_unlock(&adev->pm.mutex);
  89. /* Can't set dpm state when the card is off */
  90. if (!(adev->flags & AMD_IS_PX) ||
  91. (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
  92. amdgpu_pm_compute_clocks(adev);
  93. }
  94. fail:
  95. return count;
  96. }
  97. static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
  98. struct device_attribute *attr,
  99. char *buf)
  100. {
  101. struct drm_device *ddev = dev_get_drvdata(dev);
  102. struct amdgpu_device *adev = ddev->dev_private;
  103. if (adev->pp_enabled) {
  104. enum amd_dpm_forced_level level;
  105. level = amdgpu_dpm_get_performance_level(adev);
  106. return snprintf(buf, PAGE_SIZE, "%s\n",
  107. (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  108. (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
  109. } else {
  110. enum amdgpu_dpm_forced_level level;
  111. level = adev->pm.dpm.forced_level;
  112. return snprintf(buf, PAGE_SIZE, "%s\n",
  113. (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
  114. (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
  115. }
  116. }
  117. static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
  118. struct device_attribute *attr,
  119. const char *buf,
  120. size_t count)
  121. {
  122. struct drm_device *ddev = dev_get_drvdata(dev);
  123. struct amdgpu_device *adev = ddev->dev_private;
  124. enum amdgpu_dpm_forced_level level;
  125. int ret = 0;
  126. if (strncmp("low", buf, strlen("low")) == 0) {
  127. level = AMDGPU_DPM_FORCED_LEVEL_LOW;
  128. } else if (strncmp("high", buf, strlen("high")) == 0) {
  129. level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
  130. } else if (strncmp("auto", buf, strlen("auto")) == 0) {
  131. level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
  132. } else {
  133. count = -EINVAL;
  134. goto fail;
  135. }
  136. if (adev->pp_enabled)
  137. amdgpu_dpm_force_performance_level(adev, level);
  138. else {
  139. mutex_lock(&adev->pm.mutex);
  140. if (adev->pm.dpm.thermal_active) {
  141. count = -EINVAL;
  142. goto fail;
  143. }
  144. ret = amdgpu_dpm_force_performance_level(adev, level);
  145. if (ret)
  146. count = -EINVAL;
  147. else
  148. adev->pm.dpm.forced_level = level;
  149. mutex_unlock(&adev->pm.mutex);
  150. }
  151. fail:
  152. mutex_unlock(&adev->pm.mutex);
  153. return count;
  154. }
  155. static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
  156. static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
  157. amdgpu_get_dpm_forced_performance_level,
  158. amdgpu_set_dpm_forced_performance_level);
  159. static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
  160. struct device_attribute *attr,
  161. char *buf)
  162. {
  163. struct amdgpu_device *adev = dev_get_drvdata(dev);
  164. int temp;
  165. if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
  166. temp = 0;
  167. else
  168. temp = amdgpu_dpm_get_temperature(adev);
  169. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  170. }
  171. static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
  172. struct device_attribute *attr,
  173. char *buf)
  174. {
  175. struct amdgpu_device *adev = dev_get_drvdata(dev);
  176. int hyst = to_sensor_dev_attr(attr)->index;
  177. int temp;
  178. if (hyst)
  179. temp = adev->pm.dpm.thermal.min_temp;
  180. else
  181. temp = adev->pm.dpm.thermal.max_temp;
  182. return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  183. }
  184. static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
  185. struct device_attribute *attr,
  186. char *buf)
  187. {
  188. struct amdgpu_device *adev = dev_get_drvdata(dev);
  189. u32 pwm_mode = 0;
  190. if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
  191. return -EINVAL;
  192. pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
  193. /* never 0 (full-speed), fuse or smc-controlled always */
  194. return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
  195. }
  196. static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
  197. struct device_attribute *attr,
  198. const char *buf,
  199. size_t count)
  200. {
  201. struct amdgpu_device *adev = dev_get_drvdata(dev);
  202. int err;
  203. int value;
  204. if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
  205. return -EINVAL;
  206. err = kstrtoint(buf, 10, &value);
  207. if (err)
  208. return err;
  209. switch (value) {
  210. case 1: /* manual, percent-based */
  211. amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
  212. break;
  213. default: /* disable */
  214. amdgpu_dpm_set_fan_control_mode(adev, 0);
  215. break;
  216. }
  217. return count;
  218. }
  219. static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
  220. struct device_attribute *attr,
  221. char *buf)
  222. {
  223. return sprintf(buf, "%i\n", 0);
  224. }
  225. static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
  226. struct device_attribute *attr,
  227. char *buf)
  228. {
  229. return sprintf(buf, "%i\n", 255);
  230. }
  231. static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
  232. struct device_attribute *attr,
  233. const char *buf, size_t count)
  234. {
  235. struct amdgpu_device *adev = dev_get_drvdata(dev);
  236. int err;
  237. u32 value;
  238. err = kstrtou32(buf, 10, &value);
  239. if (err)
  240. return err;
  241. value = (value * 100) / 255;
  242. err = amdgpu_dpm_set_fan_speed_percent(adev, value);
  243. if (err)
  244. return err;
  245. return count;
  246. }
  247. static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
  248. struct device_attribute *attr,
  249. char *buf)
  250. {
  251. struct amdgpu_device *adev = dev_get_drvdata(dev);
  252. int err;
  253. u32 speed;
  254. err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
  255. if (err)
  256. return err;
  257. speed = (speed * 255) / 100;
  258. return sprintf(buf, "%i\n", speed);
  259. }
  260. static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
  261. static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
  262. static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
  263. static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
  264. static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
  265. static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
  266. static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
  267. static struct attribute *hwmon_attributes[] = {
  268. &sensor_dev_attr_temp1_input.dev_attr.attr,
  269. &sensor_dev_attr_temp1_crit.dev_attr.attr,
  270. &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
  271. &sensor_dev_attr_pwm1.dev_attr.attr,
  272. &sensor_dev_attr_pwm1_enable.dev_attr.attr,
  273. &sensor_dev_attr_pwm1_min.dev_attr.attr,
  274. &sensor_dev_attr_pwm1_max.dev_attr.attr,
  275. NULL
  276. };
  277. static umode_t hwmon_attributes_visible(struct kobject *kobj,
  278. struct attribute *attr, int index)
  279. {
  280. struct device *dev = kobj_to_dev(kobj);
  281. struct amdgpu_device *adev = dev_get_drvdata(dev);
  282. umode_t effective_mode = attr->mode;
  283. /* Skip limit attributes if DPM is not enabled */
  284. if (!adev->pm.dpm_enabled &&
  285. (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
  286. attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
  287. attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  288. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  289. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  290. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  291. return 0;
  292. if (adev->pp_enabled)
  293. return effective_mode;
  294. /* Skip fan attributes if fan is not present */
  295. if (adev->pm.no_fan &&
  296. (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
  297. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
  298. attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  299. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  300. return 0;
  301. /* mask fan attributes if we have no bindings for this asic to expose */
  302. if ((!adev->pm.funcs->get_fan_speed_percent &&
  303. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
  304. (!adev->pm.funcs->get_fan_control_mode &&
  305. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
  306. effective_mode &= ~S_IRUGO;
  307. if ((!adev->pm.funcs->set_fan_speed_percent &&
  308. attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
  309. (!adev->pm.funcs->set_fan_control_mode &&
  310. attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
  311. effective_mode &= ~S_IWUSR;
  312. /* hide max/min values if we can't both query and manage the fan */
  313. if ((!adev->pm.funcs->set_fan_speed_percent &&
  314. !adev->pm.funcs->get_fan_speed_percent) &&
  315. (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
  316. attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
  317. return 0;
  318. return effective_mode;
  319. }
  320. static const struct attribute_group hwmon_attrgroup = {
  321. .attrs = hwmon_attributes,
  322. .is_visible = hwmon_attributes_visible,
  323. };
  324. static const struct attribute_group *hwmon_groups[] = {
  325. &hwmon_attrgroup,
  326. NULL
  327. };
  328. void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
  329. {
  330. struct amdgpu_device *adev =
  331. container_of(work, struct amdgpu_device,
  332. pm.dpm.thermal.work);
  333. /* switch to the thermal state */
  334. enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
  335. if (!adev->pm.dpm_enabled)
  336. return;
  337. if (adev->pm.funcs->get_temperature) {
  338. int temp = amdgpu_dpm_get_temperature(adev);
  339. if (temp < adev->pm.dpm.thermal.min_temp)
  340. /* switch back the user state */
  341. dpm_state = adev->pm.dpm.user_state;
  342. } else {
  343. if (adev->pm.dpm.thermal.high_to_low)
  344. /* switch back the user state */
  345. dpm_state = adev->pm.dpm.user_state;
  346. }
  347. mutex_lock(&adev->pm.mutex);
  348. if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
  349. adev->pm.dpm.thermal_active = true;
  350. else
  351. adev->pm.dpm.thermal_active = false;
  352. adev->pm.dpm.state = dpm_state;
  353. mutex_unlock(&adev->pm.mutex);
  354. amdgpu_pm_compute_clocks(adev);
  355. }
  356. static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
  357. enum amd_pm_state_type dpm_state)
  358. {
  359. int i;
  360. struct amdgpu_ps *ps;
  361. u32 ui_class;
  362. bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
  363. true : false;
  364. /* check if the vblank period is too short to adjust the mclk */
  365. if (single_display && adev->pm.funcs->vblank_too_short) {
  366. if (amdgpu_dpm_vblank_too_short(adev))
  367. single_display = false;
  368. }
  369. /* certain older asics have a separare 3D performance state,
  370. * so try that first if the user selected performance
  371. */
  372. if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
  373. dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
  374. /* balanced states don't exist at the moment */
  375. if (dpm_state == POWER_STATE_TYPE_BALANCED)
  376. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  377. restart_search:
  378. /* Pick the best power state based on current conditions */
  379. for (i = 0; i < adev->pm.dpm.num_ps; i++) {
  380. ps = &adev->pm.dpm.ps[i];
  381. ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
  382. switch (dpm_state) {
  383. /* user states */
  384. case POWER_STATE_TYPE_BATTERY:
  385. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
  386. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  387. if (single_display)
  388. return ps;
  389. } else
  390. return ps;
  391. }
  392. break;
  393. case POWER_STATE_TYPE_BALANCED:
  394. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
  395. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  396. if (single_display)
  397. return ps;
  398. } else
  399. return ps;
  400. }
  401. break;
  402. case POWER_STATE_TYPE_PERFORMANCE:
  403. if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
  404. if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
  405. if (single_display)
  406. return ps;
  407. } else
  408. return ps;
  409. }
  410. break;
  411. /* internal states */
  412. case POWER_STATE_TYPE_INTERNAL_UVD:
  413. if (adev->pm.dpm.uvd_ps)
  414. return adev->pm.dpm.uvd_ps;
  415. else
  416. break;
  417. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  418. if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
  419. return ps;
  420. break;
  421. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  422. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
  423. return ps;
  424. break;
  425. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  426. if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
  427. return ps;
  428. break;
  429. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  430. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
  431. return ps;
  432. break;
  433. case POWER_STATE_TYPE_INTERNAL_BOOT:
  434. return adev->pm.dpm.boot_ps;
  435. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  436. if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
  437. return ps;
  438. break;
  439. case POWER_STATE_TYPE_INTERNAL_ACPI:
  440. if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
  441. return ps;
  442. break;
  443. case POWER_STATE_TYPE_INTERNAL_ULV:
  444. if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
  445. return ps;
  446. break;
  447. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  448. if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
  449. return ps;
  450. break;
  451. default:
  452. break;
  453. }
  454. }
  455. /* use a fallback state if we didn't match */
  456. switch (dpm_state) {
  457. case POWER_STATE_TYPE_INTERNAL_UVD_SD:
  458. dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
  459. goto restart_search;
  460. case POWER_STATE_TYPE_INTERNAL_UVD_HD:
  461. case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
  462. case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
  463. if (adev->pm.dpm.uvd_ps) {
  464. return adev->pm.dpm.uvd_ps;
  465. } else {
  466. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  467. goto restart_search;
  468. }
  469. case POWER_STATE_TYPE_INTERNAL_THERMAL:
  470. dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
  471. goto restart_search;
  472. case POWER_STATE_TYPE_INTERNAL_ACPI:
  473. dpm_state = POWER_STATE_TYPE_BATTERY;
  474. goto restart_search;
  475. case POWER_STATE_TYPE_BATTERY:
  476. case POWER_STATE_TYPE_BALANCED:
  477. case POWER_STATE_TYPE_INTERNAL_3DPERF:
  478. dpm_state = POWER_STATE_TYPE_PERFORMANCE;
  479. goto restart_search;
  480. default:
  481. break;
  482. }
  483. return NULL;
  484. }
  485. static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
  486. {
  487. int i;
  488. struct amdgpu_ps *ps;
  489. enum amd_pm_state_type dpm_state;
  490. int ret;
  491. /* if dpm init failed */
  492. if (!adev->pm.dpm_enabled)
  493. return;
  494. if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
  495. /* add other state override checks here */
  496. if ((!adev->pm.dpm.thermal_active) &&
  497. (!adev->pm.dpm.uvd_active))
  498. adev->pm.dpm.state = adev->pm.dpm.user_state;
  499. }
  500. dpm_state = adev->pm.dpm.state;
  501. ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
  502. if (ps)
  503. adev->pm.dpm.requested_ps = ps;
  504. else
  505. return;
  506. /* no need to reprogram if nothing changed unless we are on BTC+ */
  507. if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
  508. /* vce just modifies an existing state so force a change */
  509. if (ps->vce_active != adev->pm.dpm.vce_active)
  510. goto force;
  511. if (adev->flags & AMD_IS_APU) {
  512. /* for APUs if the num crtcs changed but state is the same,
  513. * all we need to do is update the display configuration.
  514. */
  515. if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
  516. /* update display watermarks based on new power state */
  517. amdgpu_display_bandwidth_update(adev);
  518. /* update displays */
  519. amdgpu_dpm_display_configuration_changed(adev);
  520. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  521. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  522. }
  523. return;
  524. } else {
  525. /* for BTC+ if the num crtcs hasn't changed and state is the same,
  526. * nothing to do, if the num crtcs is > 1 and state is the same,
  527. * update display configuration.
  528. */
  529. if (adev->pm.dpm.new_active_crtcs ==
  530. adev->pm.dpm.current_active_crtcs) {
  531. return;
  532. } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
  533. (adev->pm.dpm.new_active_crtc_count > 1)) {
  534. /* update display watermarks based on new power state */
  535. amdgpu_display_bandwidth_update(adev);
  536. /* update displays */
  537. amdgpu_dpm_display_configuration_changed(adev);
  538. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  539. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  540. return;
  541. }
  542. }
  543. }
  544. force:
  545. if (amdgpu_dpm == 1) {
  546. printk("switching from power state:\n");
  547. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
  548. printk("switching to power state:\n");
  549. amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
  550. }
  551. mutex_lock(&adev->ring_lock);
  552. /* update whether vce is active */
  553. ps->vce_active = adev->pm.dpm.vce_active;
  554. ret = amdgpu_dpm_pre_set_power_state(adev);
  555. if (ret)
  556. goto done;
  557. /* update display watermarks based on new power state */
  558. amdgpu_display_bandwidth_update(adev);
  559. /* update displays */
  560. amdgpu_dpm_display_configuration_changed(adev);
  561. adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
  562. adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
  563. /* wait for the rings to drain */
  564. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  565. struct amdgpu_ring *ring = adev->rings[i];
  566. if (ring && ring->ready)
  567. amdgpu_fence_wait_empty(ring);
  568. }
  569. /* program the new power state */
  570. amdgpu_dpm_set_power_state(adev);
  571. /* update current power state */
  572. adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
  573. amdgpu_dpm_post_set_power_state(adev);
  574. if (adev->pm.funcs->force_performance_level) {
  575. if (adev->pm.dpm.thermal_active) {
  576. enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
  577. /* force low perf level for thermal */
  578. amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
  579. /* save the user's level */
  580. adev->pm.dpm.forced_level = level;
  581. } else {
  582. /* otherwise, user selected level */
  583. amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
  584. }
  585. }
  586. done:
  587. mutex_unlock(&adev->ring_lock);
  588. }
  589. void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
  590. {
  591. if (adev->pp_enabled)
  592. amdgpu_dpm_powergate_uvd(adev, !enable);
  593. else {
  594. if (adev->pm.funcs->powergate_uvd) {
  595. mutex_lock(&adev->pm.mutex);
  596. /* enable/disable UVD */
  597. amdgpu_dpm_powergate_uvd(adev, !enable);
  598. mutex_unlock(&adev->pm.mutex);
  599. } else {
  600. if (enable) {
  601. mutex_lock(&adev->pm.mutex);
  602. adev->pm.dpm.uvd_active = true;
  603. adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
  604. mutex_unlock(&adev->pm.mutex);
  605. } else {
  606. mutex_lock(&adev->pm.mutex);
  607. adev->pm.dpm.uvd_active = false;
  608. mutex_unlock(&adev->pm.mutex);
  609. }
  610. amdgpu_pm_compute_clocks(adev);
  611. }
  612. }
  613. }
  614. void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
  615. {
  616. if (adev->pp_enabled)
  617. amdgpu_dpm_powergate_vce(adev, !enable);
  618. else {
  619. if (adev->pm.funcs->powergate_vce) {
  620. mutex_lock(&adev->pm.mutex);
  621. amdgpu_dpm_powergate_vce(adev, !enable);
  622. mutex_unlock(&adev->pm.mutex);
  623. } else {
  624. if (enable) {
  625. mutex_lock(&adev->pm.mutex);
  626. adev->pm.dpm.vce_active = true;
  627. /* XXX select vce level based on ring/task */
  628. adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
  629. mutex_unlock(&adev->pm.mutex);
  630. } else {
  631. mutex_lock(&adev->pm.mutex);
  632. adev->pm.dpm.vce_active = false;
  633. mutex_unlock(&adev->pm.mutex);
  634. }
  635. amdgpu_pm_compute_clocks(adev);
  636. }
  637. }
  638. }
  639. void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
  640. {
  641. int i;
  642. if (adev->pp_enabled)
  643. /* TO DO */
  644. return;
  645. for (i = 0; i < adev->pm.dpm.num_ps; i++)
  646. amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
  647. }
  648. int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
  649. {
  650. int ret;
  651. if (adev->pm.sysfs_initialized)
  652. return 0;
  653. if (!adev->pp_enabled) {
  654. if (adev->pm.funcs->get_temperature == NULL)
  655. return 0;
  656. }
  657. adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
  658. DRIVER_NAME, adev,
  659. hwmon_groups);
  660. if (IS_ERR(adev->pm.int_hwmon_dev)) {
  661. ret = PTR_ERR(adev->pm.int_hwmon_dev);
  662. dev_err(adev->dev,
  663. "Unable to register hwmon device: %d\n", ret);
  664. return ret;
  665. }
  666. ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
  667. if (ret) {
  668. DRM_ERROR("failed to create device file for dpm state\n");
  669. return ret;
  670. }
  671. ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  672. if (ret) {
  673. DRM_ERROR("failed to create device file for dpm state\n");
  674. return ret;
  675. }
  676. ret = amdgpu_debugfs_pm_init(adev);
  677. if (ret) {
  678. DRM_ERROR("Failed to register debugfs file for dpm!\n");
  679. return ret;
  680. }
  681. adev->pm.sysfs_initialized = true;
  682. return 0;
  683. }
  684. void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
  685. {
  686. if (adev->pm.int_hwmon_dev)
  687. hwmon_device_unregister(adev->pm.int_hwmon_dev);
  688. device_remove_file(adev->dev, &dev_attr_power_dpm_state);
  689. device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
  690. }
  691. void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
  692. {
  693. struct drm_device *ddev = adev->ddev;
  694. struct drm_crtc *crtc;
  695. struct amdgpu_crtc *amdgpu_crtc;
  696. if (!adev->pm.dpm_enabled)
  697. return;
  698. if (adev->pp_enabled) {
  699. int i = 0;
  700. amdgpu_display_bandwidth_update(adev);
  701. mutex_lock(&adev->ring_lock);
  702. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  703. struct amdgpu_ring *ring = adev->rings[i];
  704. if (ring && ring->ready)
  705. amdgpu_fence_wait_empty(ring);
  706. }
  707. mutex_unlock(&adev->ring_lock);
  708. amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
  709. } else {
  710. mutex_lock(&adev->pm.mutex);
  711. adev->pm.dpm.new_active_crtcs = 0;
  712. adev->pm.dpm.new_active_crtc_count = 0;
  713. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  714. list_for_each_entry(crtc,
  715. &ddev->mode_config.crtc_list, head) {
  716. amdgpu_crtc = to_amdgpu_crtc(crtc);
  717. if (crtc->enabled) {
  718. adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
  719. adev->pm.dpm.new_active_crtc_count++;
  720. }
  721. }
  722. }
  723. /* update battery/ac status */
  724. if (power_supply_is_system_supplied() > 0)
  725. adev->pm.dpm.ac_power = true;
  726. else
  727. adev->pm.dpm.ac_power = false;
  728. amdgpu_dpm_change_power_state_locked(adev);
  729. mutex_unlock(&adev->pm.mutex);
  730. }
  731. }
  732. /*
  733. * Debugfs info
  734. */
  735. #if defined(CONFIG_DEBUG_FS)
  736. static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
  737. {
  738. struct drm_info_node *node = (struct drm_info_node *) m->private;
  739. struct drm_device *dev = node->minor->dev;
  740. struct amdgpu_device *adev = dev->dev_private;
  741. if (!adev->pm.dpm_enabled) {
  742. seq_printf(m, "dpm not enabled\n");
  743. return 0;
  744. }
  745. if (adev->pp_enabled) {
  746. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  747. } else {
  748. mutex_lock(&adev->pm.mutex);
  749. if (adev->pm.funcs->debugfs_print_current_performance_level)
  750. amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
  751. else
  752. seq_printf(m, "Debugfs support not implemented for this asic\n");
  753. mutex_unlock(&adev->pm.mutex);
  754. }
  755. return 0;
  756. }
  757. static struct drm_info_list amdgpu_pm_info_list[] = {
  758. {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
  759. };
  760. #endif
  761. static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
  762. {
  763. #if defined(CONFIG_DEBUG_FS)
  764. return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
  765. #else
  766. return 0;
  767. #endif
  768. }