amdgpu_dm.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: AMD
  23. *
  24. */
  25. #include "dm_services_types.h"
  26. #include "dc.h"
  27. #include "vid.h"
  28. #include "amdgpu.h"
  29. #include "amdgpu_display.h"
  30. #include "atom.h"
  31. #include "amdgpu_dm.h"
  32. #include "amdgpu_dm_types.h"
  33. #include "amd_shared.h"
  34. #include "amdgpu_dm_irq.h"
  35. #include "dm_helpers.h"
  36. #include "ivsrcid/ivsrcid_vislands30.h"
  37. #include <linux/module.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/version.h>
  40. #include <drm/drm_atomic.h>
  41. #include <drm/drm_atomic_helper.h>
  42. #include <drm/drm_dp_mst_helper.h>
  43. #include "modules/inc/mod_freesync.h"
  44. /*
  45. * dm_vblank_get_counter
  46. *
  47. * @brief
  48. * Get counter for number of vertical blanks
  49. *
  50. * @param
  51. * struct amdgpu_device *adev - [in] desired amdgpu device
  52. * int disp_idx - [in] which CRTC to get the counter from
  53. *
  54. * @return
  55. * Counter for vertical blanks
  56. */
  57. static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
  58. {
  59. if (crtc >= adev->mode_info.num_crtc)
  60. return 0;
  61. else {
  62. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  63. if (NULL == acrtc->stream) {
  64. DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
  65. return 0;
  66. }
  67. return dc_stream_get_vblank_counter(acrtc->stream);
  68. }
  69. }
  70. static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
  71. u32 *vbl, u32 *position)
  72. {
  73. if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  74. return -EINVAL;
  75. else {
  76. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  77. if (NULL == acrtc->stream) {
  78. DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
  79. return 0;
  80. }
  81. return dc_stream_get_scanoutpos(acrtc->stream, vbl, position);
  82. }
  83. return 0;
  84. }
  85. static bool dm_is_idle(void *handle)
  86. {
  87. /* XXX todo */
  88. return true;
  89. }
  90. static int dm_wait_for_idle(void *handle)
  91. {
  92. /* XXX todo */
  93. return 0;
  94. }
  95. static bool dm_check_soft_reset(void *handle)
  96. {
  97. return false;
  98. }
  99. static int dm_soft_reset(void *handle)
  100. {
  101. /* XXX todo */
  102. return 0;
  103. }
  104. static struct amdgpu_crtc *get_crtc_by_otg_inst(
  105. struct amdgpu_device *adev,
  106. int otg_inst)
  107. {
  108. struct drm_device *dev = adev->ddev;
  109. struct drm_crtc *crtc;
  110. struct amdgpu_crtc *amdgpu_crtc;
  111. /*
  112. * following if is check inherited from both functions where this one is
  113. * used now. Need to be checked why it could happen.
  114. */
  115. if (otg_inst == -1) {
  116. WARN_ON(1);
  117. return adev->mode_info.crtcs[0];
  118. }
  119. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  120. amdgpu_crtc = to_amdgpu_crtc(crtc);
  121. if (amdgpu_crtc->otg_inst == otg_inst)
  122. return amdgpu_crtc;
  123. }
  124. return NULL;
  125. }
  126. static void dm_pflip_high_irq(void *interrupt_params)
  127. {
  128. struct amdgpu_crtc *amdgpu_crtc;
  129. struct common_irq_params *irq_params = interrupt_params;
  130. struct amdgpu_device *adev = irq_params->adev;
  131. unsigned long flags;
  132. amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
  133. /* IRQ could occur when in initial stage */
  134. /*TODO work and BO cleanup */
  135. if (amdgpu_crtc == NULL) {
  136. DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
  137. return;
  138. }
  139. spin_lock_irqsave(&adev->ddev->event_lock, flags);
  140. if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
  141. DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
  142. amdgpu_crtc->pflip_status,
  143. AMDGPU_FLIP_SUBMITTED,
  144. amdgpu_crtc->crtc_id,
  145. amdgpu_crtc);
  146. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  147. return;
  148. }
  149. /* wakeup usersapce */
  150. if (amdgpu_crtc->event
  151. && amdgpu_crtc->event->event.base.type
  152. == DRM_EVENT_FLIP_COMPLETE) {
  153. drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
  154. /* page flip completed. clean up */
  155. amdgpu_crtc->event = NULL;
  156. } else
  157. WARN_ON(1);
  158. amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
  159. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  160. DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
  161. __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
  162. drm_crtc_vblank_put(&amdgpu_crtc->base);
  163. }
  164. static void dm_crtc_high_irq(void *interrupt_params)
  165. {
  166. struct common_irq_params *irq_params = interrupt_params;
  167. struct amdgpu_device *adev = irq_params->adev;
  168. uint8_t crtc_index = 0;
  169. struct amdgpu_crtc *acrtc;
  170. acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
  171. if (acrtc)
  172. crtc_index = acrtc->crtc_id;
  173. drm_handle_vblank(adev->ddev, crtc_index);
  174. }
  175. static int dm_set_clockgating_state(void *handle,
  176. enum amd_clockgating_state state)
  177. {
  178. return 0;
  179. }
  180. static int dm_set_powergating_state(void *handle,
  181. enum amd_powergating_state state)
  182. {
  183. return 0;
  184. }
  185. /* Prototypes of private functions */
  186. static int dm_early_init(void* handle);
  187. static void hotplug_notify_work_func(struct work_struct *work)
  188. {
  189. struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
  190. struct drm_device *dev = dm->ddev;
  191. drm_kms_helper_hotplug_event(dev);
  192. }
  193. /* Init display KMS
  194. *
  195. * Returns 0 on success
  196. */
  197. int amdgpu_dm_init(struct amdgpu_device *adev)
  198. {
  199. struct dc_init_data init_data;
  200. adev->dm.ddev = adev->ddev;
  201. adev->dm.adev = adev;
  202. DRM_INFO("DAL is enabled\n");
  203. /* Zero all the fields */
  204. memset(&init_data, 0, sizeof(init_data));
  205. /* initialize DAL's lock (for SYNC context use) */
  206. spin_lock_init(&adev->dm.dal_lock);
  207. /* initialize DAL's mutex */
  208. mutex_init(&adev->dm.dal_mutex);
  209. if(amdgpu_dm_irq_init(adev)) {
  210. DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
  211. goto error;
  212. }
  213. init_data.asic_id.chip_family = adev->family;
  214. init_data.asic_id.pci_revision_id = adev->rev_id;
  215. init_data.asic_id.hw_internal_rev = adev->external_rev_id;
  216. init_data.asic_id.vram_width = adev->mc.vram_width;
  217. /* TODO: initialize init_data.asic_id.vram_type here!!!! */
  218. init_data.asic_id.atombios_base_address =
  219. adev->mode_info.atom_context->bios;
  220. init_data.driver = adev;
  221. adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
  222. if (!adev->dm.cgs_device) {
  223. DRM_ERROR("amdgpu: failed to create cgs device.\n");
  224. goto error;
  225. }
  226. init_data.cgs_device = adev->dm.cgs_device;
  227. adev->dm.dal = NULL;
  228. init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
  229. /* Display Core create. */
  230. adev->dm.dc = dc_create(&init_data);
  231. if (!adev->dm.dc)
  232. DRM_INFO("Display Core failed to initialize!\n");
  233. INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
  234. adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
  235. if (!adev->dm.freesync_module) {
  236. DRM_ERROR(
  237. "amdgpu: failed to initialize freesync_module.\n");
  238. } else
  239. DRM_INFO("amdgpu: freesync_module init done %p.\n",
  240. adev->dm.freesync_module);
  241. if (amdgpu_dm_initialize_drm_device(adev)) {
  242. DRM_ERROR(
  243. "amdgpu: failed to initialize sw for display support.\n");
  244. goto error;
  245. }
  246. /* Update the actual used number of crtc */
  247. adev->mode_info.num_crtc = adev->dm.display_indexes_num;
  248. /* TODO: Add_display_info? */
  249. /* TODO use dynamic cursor width */
  250. adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
  251. adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
  252. if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
  253. DRM_ERROR(
  254. "amdgpu: failed to initialize sw for display support.\n");
  255. goto error;
  256. }
  257. DRM_INFO("KMS initialized.\n");
  258. return 0;
  259. error:
  260. amdgpu_dm_fini(adev);
  261. return -1;
  262. }
  263. void amdgpu_dm_fini(struct amdgpu_device *adev)
  264. {
  265. amdgpu_dm_destroy_drm_device(&adev->dm);
  266. /*
  267. * TODO: pageflip, vlank interrupt
  268. *
  269. * amdgpu_dm_irq_fini(adev);
  270. */
  271. if (adev->dm.cgs_device) {
  272. amdgpu_cgs_destroy_device(adev->dm.cgs_device);
  273. adev->dm.cgs_device = NULL;
  274. }
  275. if (adev->dm.freesync_module) {
  276. mod_freesync_destroy(adev->dm.freesync_module);
  277. adev->dm.freesync_module = NULL;
  278. }
  279. /* DC Destroy TODO: Replace destroy DAL */
  280. {
  281. dc_destroy(&adev->dm.dc);
  282. }
  283. return;
  284. }
  285. /* moved from amdgpu_dm_kms.c */
  286. void amdgpu_dm_destroy()
  287. {
  288. }
  289. static int dm_sw_init(void *handle)
  290. {
  291. return 0;
  292. }
  293. static int dm_sw_fini(void *handle)
  294. {
  295. return 0;
  296. }
  297. static int detect_mst_link_for_all_connectors(struct drm_device *dev)
  298. {
  299. struct amdgpu_connector *aconnector;
  300. struct drm_connector *connector;
  301. int ret = 0;
  302. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  303. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  304. aconnector = to_amdgpu_connector(connector);
  305. if (aconnector->dc_link->type == dc_connection_mst_branch) {
  306. DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
  307. aconnector, aconnector->base.base.id);
  308. ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
  309. if (ret < 0) {
  310. DRM_ERROR("DM_MST: Failed to start MST\n");
  311. ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
  312. return ret;
  313. }
  314. }
  315. }
  316. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  317. return ret;
  318. }
  319. static int dm_late_init(void *handle)
  320. {
  321. struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
  322. int r = detect_mst_link_for_all_connectors(dev);
  323. return r;
  324. }
  325. static void s3_handle_mst(struct drm_device *dev, bool suspend)
  326. {
  327. struct amdgpu_connector *aconnector;
  328. struct drm_connector *connector;
  329. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  330. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  331. aconnector = to_amdgpu_connector(connector);
  332. if (aconnector->dc_link->type == dc_connection_mst_branch &&
  333. !aconnector->mst_port) {
  334. if (suspend)
  335. drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
  336. else
  337. drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
  338. }
  339. }
  340. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  341. }
  342. static int dm_hw_init(void *handle)
  343. {
  344. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  345. /* Create DAL display manager */
  346. amdgpu_dm_init(adev);
  347. amdgpu_dm_hpd_init(adev);
  348. return 0;
  349. }
  350. static int dm_hw_fini(void *handle)
  351. {
  352. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  353. amdgpu_dm_hpd_fini(adev);
  354. amdgpu_dm_irq_fini(adev);
  355. return 0;
  356. }
  357. static int dm_suspend(void *handle)
  358. {
  359. struct amdgpu_device *adev = handle;
  360. struct amdgpu_display_manager *dm = &adev->dm;
  361. int ret = 0;
  362. s3_handle_mst(adev->ddev, true);
  363. amdgpu_dm_irq_suspend(adev);
  364. adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
  365. dc_set_power_state(
  366. dm->dc,
  367. DC_ACPI_CM_POWER_STATE_D3
  368. );
  369. return ret;
  370. }
  371. struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
  372. struct drm_atomic_state *state,
  373. struct drm_crtc *crtc,
  374. bool from_state_var)
  375. {
  376. uint32_t i;
  377. struct drm_connector_state *conn_state;
  378. struct drm_connector *connector;
  379. struct drm_crtc *crtc_from_state;
  380. for_each_connector_in_state(
  381. state,
  382. connector,
  383. conn_state,
  384. i) {
  385. crtc_from_state =
  386. from_state_var ?
  387. conn_state->crtc :
  388. connector->state->crtc;
  389. if (crtc_from_state == crtc)
  390. return to_amdgpu_connector(connector);
  391. }
  392. return NULL;
  393. }
  394. static int dm_resume(void *handle)
  395. {
  396. struct amdgpu_device *adev = handle;
  397. struct amdgpu_display_manager *dm = &adev->dm;
  398. /* power on hardware */
  399. dc_set_power_state(
  400. dm->dc,
  401. DC_ACPI_CM_POWER_STATE_D0
  402. );
  403. return 0;
  404. }
  405. int amdgpu_dm_display_resume(struct amdgpu_device *adev )
  406. {
  407. struct drm_device *ddev = adev->ddev;
  408. struct amdgpu_display_manager *dm = &adev->dm;
  409. struct amdgpu_connector *aconnector;
  410. struct drm_connector *connector;
  411. struct drm_crtc *crtc;
  412. struct drm_crtc_state *crtc_state;
  413. int ret = 0;
  414. int i;
  415. /* program HPD filter */
  416. dc_resume(dm->dc);
  417. /* On resume we need to rewrite the MSTM control bits to enamble MST*/
  418. s3_handle_mst(ddev, false);
  419. /*
  420. * early enable HPD Rx IRQ, should be done before set mode as short
  421. * pulse interrupts are used for MST
  422. */
  423. amdgpu_dm_irq_resume_early(adev);
  424. /* Do detection*/
  425. list_for_each_entry(connector,
  426. &ddev->mode_config.connector_list, head) {
  427. aconnector = to_amdgpu_connector(connector);
  428. /*
  429. * this is the case when traversing through already created
  430. * MST connectors, should be skipped
  431. */
  432. if (aconnector->mst_port)
  433. continue;
  434. dc_link_detect(aconnector->dc_link, false);
  435. aconnector->dc_sink = NULL;
  436. amdgpu_dm_update_connector_after_detect(aconnector);
  437. }
  438. /* Force mode set in atomic comit */
  439. for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
  440. crtc_state->active_changed = true;
  441. ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
  442. amdgpu_dm_irq_resume(adev);
  443. return ret;
  444. }
  445. static const struct amd_ip_funcs amdgpu_dm_funcs = {
  446. .name = "dm",
  447. .early_init = dm_early_init,
  448. .late_init = dm_late_init,
  449. .sw_init = dm_sw_init,
  450. .sw_fini = dm_sw_fini,
  451. .hw_init = dm_hw_init,
  452. .hw_fini = dm_hw_fini,
  453. .suspend = dm_suspend,
  454. .resume = dm_resume,
  455. .is_idle = dm_is_idle,
  456. .wait_for_idle = dm_wait_for_idle,
  457. .check_soft_reset = dm_check_soft_reset,
  458. .soft_reset = dm_soft_reset,
  459. .set_clockgating_state = dm_set_clockgating_state,
  460. .set_powergating_state = dm_set_powergating_state,
  461. };
  462. const struct amdgpu_ip_block_version dm_ip_block =
  463. {
  464. .type = AMD_IP_BLOCK_TYPE_DCE,
  465. .major = 1,
  466. .minor = 0,
  467. .rev = 0,
  468. .funcs = &amdgpu_dm_funcs,
  469. };
  470. /* TODO: it is temporary non-const, should fixed later */
  471. static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
  472. .fb_create = amdgpu_user_framebuffer_create,
  473. .output_poll_changed = amdgpu_output_poll_changed,
  474. .atomic_check = amdgpu_dm_atomic_check,
  475. .atomic_commit = drm_atomic_helper_commit
  476. };
  477. static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
  478. .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
  479. };
  480. void amdgpu_dm_update_connector_after_detect(
  481. struct amdgpu_connector *aconnector)
  482. {
  483. struct drm_connector *connector = &aconnector->base;
  484. struct drm_device *dev = connector->dev;
  485. const struct dc_sink *sink;
  486. /* MST handled by drm_mst framework */
  487. if (aconnector->mst_mgr.mst_state == true)
  488. return;
  489. sink = aconnector->dc_link->local_sink;
  490. /* Edid mgmt connector gets first update only in mode_valid hook and then
  491. * the connector sink is set to either fake or physical sink depends on link status.
  492. * don't do it here if u are during boot
  493. */
  494. if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
  495. && aconnector->dc_em_sink) {
  496. /* For S3 resume with headless use eml_sink to fake stream
  497. * because on resume connecotr->sink is set ti NULL
  498. */
  499. mutex_lock(&dev->mode_config.mutex);
  500. if (sink) {
  501. if (aconnector->dc_sink) {
  502. amdgpu_dm_remove_sink_from_freesync_module(
  503. connector);
  504. /* retain and release bellow are used for
  505. * bump up refcount for sink because the link don't point
  506. * to it anymore after disconnect so on next crtc to connector
  507. * reshuffle by UMD we will get into unwanted dc_sink release
  508. */
  509. if (aconnector->dc_sink != aconnector->dc_em_sink)
  510. dc_sink_release(aconnector->dc_sink);
  511. }
  512. aconnector->dc_sink = sink;
  513. amdgpu_dm_add_sink_to_freesync_module(
  514. connector, aconnector->edid);
  515. } else {
  516. amdgpu_dm_remove_sink_from_freesync_module(connector);
  517. if (!aconnector->dc_sink)
  518. aconnector->dc_sink = aconnector->dc_em_sink;
  519. else if (aconnector->dc_sink != aconnector->dc_em_sink)
  520. dc_sink_retain(aconnector->dc_sink);
  521. }
  522. mutex_unlock(&dev->mode_config.mutex);
  523. return;
  524. }
  525. /*
  526. * TODO: temporary guard to look for proper fix
  527. * if this sink is MST sink, we should not do anything
  528. */
  529. if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
  530. return;
  531. if (aconnector->dc_sink == sink) {
  532. /* We got a DP short pulse (Link Loss, DP CTS, etc...).
  533. * Do nothing!! */
  534. DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
  535. aconnector->connector_id);
  536. return;
  537. }
  538. DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
  539. aconnector->connector_id, aconnector->dc_sink, sink);
  540. mutex_lock(&dev->mode_config.mutex);
  541. /* 1. Update status of the drm connector
  542. * 2. Send an event and let userspace tell us what to do */
  543. if (sink) {
  544. /* TODO: check if we still need the S3 mode update workaround.
  545. * If yes, put it here. */
  546. if (aconnector->dc_sink)
  547. amdgpu_dm_remove_sink_from_freesync_module(
  548. connector);
  549. aconnector->dc_sink = sink;
  550. if (sink->dc_edid.length == 0)
  551. aconnector->edid = NULL;
  552. else {
  553. aconnector->edid =
  554. (struct edid *) sink->dc_edid.raw_edid;
  555. drm_mode_connector_update_edid_property(connector,
  556. aconnector->edid);
  557. }
  558. amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
  559. } else {
  560. amdgpu_dm_remove_sink_from_freesync_module(connector);
  561. drm_mode_connector_update_edid_property(connector, NULL);
  562. aconnector->num_modes = 0;
  563. aconnector->dc_sink = NULL;
  564. }
  565. mutex_unlock(&dev->mode_config.mutex);
  566. }
  567. static void handle_hpd_irq(void *param)
  568. {
  569. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  570. struct drm_connector *connector = &aconnector->base;
  571. struct drm_device *dev = connector->dev;
  572. /* In case of failure or MST no need to update connector status or notify the OS
  573. * since (for MST case) MST does this in it's own context.
  574. */
  575. mutex_lock(&aconnector->hpd_lock);
  576. if (dc_link_detect(aconnector->dc_link, false)) {
  577. amdgpu_dm_update_connector_after_detect(aconnector);
  578. drm_modeset_lock_all(dev);
  579. dm_restore_drm_connector_state(dev, connector);
  580. drm_modeset_unlock_all(dev);
  581. if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
  582. drm_kms_helper_hotplug_event(dev);
  583. }
  584. mutex_unlock(&aconnector->hpd_lock);
  585. }
  586. static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
  587. {
  588. uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
  589. uint8_t dret;
  590. bool new_irq_handled = false;
  591. int dpcd_addr;
  592. int dpcd_bytes_to_read;
  593. const int max_process_count = 30;
  594. int process_count = 0;
  595. const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
  596. if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
  597. dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
  598. /* DPCD 0x200 - 0x201 for downstream IRQ */
  599. dpcd_addr = DP_SINK_COUNT;
  600. } else {
  601. dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
  602. /* DPCD 0x2002 - 0x2005 for downstream IRQ */
  603. dpcd_addr = DP_SINK_COUNT_ESI;
  604. }
  605. dret = drm_dp_dpcd_read(
  606. &aconnector->dm_dp_aux.aux,
  607. dpcd_addr,
  608. esi,
  609. dpcd_bytes_to_read);
  610. while (dret == dpcd_bytes_to_read &&
  611. process_count < max_process_count) {
  612. uint8_t retry;
  613. dret = 0;
  614. process_count++;
  615. DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  616. /* handle HPD short pulse irq */
  617. if (aconnector->mst_mgr.mst_state)
  618. drm_dp_mst_hpd_irq(
  619. &aconnector->mst_mgr,
  620. esi,
  621. &new_irq_handled);
  622. if (new_irq_handled) {
  623. /* ACK at DPCD to notify down stream */
  624. const int ack_dpcd_bytes_to_write =
  625. dpcd_bytes_to_read - 1;
  626. for (retry = 0; retry < 3; retry++) {
  627. uint8_t wret;
  628. wret = drm_dp_dpcd_write(
  629. &aconnector->dm_dp_aux.aux,
  630. dpcd_addr + 1,
  631. &esi[1],
  632. ack_dpcd_bytes_to_write);
  633. if (wret == ack_dpcd_bytes_to_write)
  634. break;
  635. }
  636. /* check if there is new irq to be handle */
  637. dret = drm_dp_dpcd_read(
  638. &aconnector->dm_dp_aux.aux,
  639. dpcd_addr,
  640. esi,
  641. dpcd_bytes_to_read);
  642. new_irq_handled = false;
  643. } else
  644. break;
  645. }
  646. if (process_count == max_process_count)
  647. DRM_DEBUG_KMS("Loop exceeded max iterations\n");
  648. }
  649. static void handle_hpd_rx_irq(void *param)
  650. {
  651. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  652. struct drm_connector *connector = &aconnector->base;
  653. struct drm_device *dev = connector->dev;
  654. const struct dc_link *dc_link = aconnector->dc_link;
  655. bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
  656. /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
  657. * conflict, after implement i2c helper, this mutex should be
  658. * retired.
  659. */
  660. if (aconnector->dc_link->type != dc_connection_mst_branch)
  661. mutex_lock(&aconnector->hpd_lock);
  662. if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
  663. !is_mst_root_connector) {
  664. /* Downstream Port status changed. */
  665. if (dc_link_detect(aconnector->dc_link, false)) {
  666. amdgpu_dm_update_connector_after_detect(aconnector);
  667. drm_modeset_lock_all(dev);
  668. dm_restore_drm_connector_state(dev, connector);
  669. drm_modeset_unlock_all(dev);
  670. drm_kms_helper_hotplug_event(dev);
  671. }
  672. }
  673. if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
  674. (dc_link->type == dc_connection_mst_branch))
  675. dm_handle_hpd_rx_irq(aconnector);
  676. if (aconnector->dc_link->type != dc_connection_mst_branch)
  677. mutex_unlock(&aconnector->hpd_lock);
  678. }
  679. static void register_hpd_handlers(struct amdgpu_device *adev)
  680. {
  681. struct drm_device *dev = adev->ddev;
  682. struct drm_connector *connector;
  683. struct amdgpu_connector *aconnector;
  684. const struct dc_link *dc_link;
  685. struct dc_interrupt_params int_params = {0};
  686. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  687. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  688. list_for_each_entry(connector,
  689. &dev->mode_config.connector_list, head) {
  690. aconnector = to_amdgpu_connector(connector);
  691. dc_link = aconnector->dc_link;
  692. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
  693. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  694. int_params.irq_source = dc_link->irq_source_hpd;
  695. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  696. handle_hpd_irq,
  697. (void *) aconnector);
  698. }
  699. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
  700. /* Also register for DP short pulse (hpd_rx). */
  701. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  702. int_params.irq_source = dc_link->irq_source_hpd_rx;
  703. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  704. handle_hpd_rx_irq,
  705. (void *) aconnector);
  706. }
  707. }
  708. }
  709. /* Register IRQ sources and initialize IRQ callbacks */
  710. static int dce110_register_irq_handlers(struct amdgpu_device *adev)
  711. {
  712. struct dc *dc = adev->dm.dc;
  713. struct common_irq_params *c_irq_params;
  714. struct dc_interrupt_params int_params = {0};
  715. int r;
  716. int i;
  717. unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
  718. if (adev->asic_type == CHIP_VEGA10)
  719. client_id = AMDGPU_IH_CLIENTID_DCE;
  720. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  721. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  722. /* Actions of amdgpu_irq_add_id():
  723. * 1. Register a set() function with base driver.
  724. * Base driver will call set() function to enable/disable an
  725. * interrupt in DC hardware.
  726. * 2. Register amdgpu_dm_irq_handler().
  727. * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
  728. * coming from DC hardware.
  729. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
  730. * for acknowledging and handling. */
  731. /* Use VBLANK interrupt */
  732. for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
  733. r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
  734. if (r) {
  735. DRM_ERROR("Failed to add crtc irq id!\n");
  736. return r;
  737. }
  738. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  739. int_params.irq_source =
  740. dc_interrupt_to_irq_source(dc, i, 0);
  741. c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
  742. c_irq_params->adev = adev;
  743. c_irq_params->irq_src = int_params.irq_source;
  744. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  745. dm_crtc_high_irq, c_irq_params);
  746. }
  747. /* Use GRPH_PFLIP interrupt */
  748. for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
  749. i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
  750. r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
  751. if (r) {
  752. DRM_ERROR("Failed to add page flip irq id!\n");
  753. return r;
  754. }
  755. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  756. int_params.irq_source =
  757. dc_interrupt_to_irq_source(dc, i, 0);
  758. c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
  759. c_irq_params->adev = adev;
  760. c_irq_params->irq_src = int_params.irq_source;
  761. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  762. dm_pflip_high_irq, c_irq_params);
  763. }
  764. /* HPD */
  765. r = amdgpu_irq_add_id(adev, client_id,
  766. VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
  767. if (r) {
  768. DRM_ERROR("Failed to add hpd irq id!\n");
  769. return r;
  770. }
  771. register_hpd_handlers(adev);
  772. return 0;
  773. }
  774. static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
  775. {
  776. int r;
  777. adev->mode_info.mode_config_initialized = true;
  778. adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
  779. adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
  780. adev->ddev->mode_config.max_width = 16384;
  781. adev->ddev->mode_config.max_height = 16384;
  782. adev->ddev->mode_config.preferred_depth = 24;
  783. adev->ddev->mode_config.prefer_shadow = 1;
  784. /* indicate support of immediate flip */
  785. adev->ddev->mode_config.async_page_flip = true;
  786. adev->ddev->mode_config.fb_base = adev->mc.aper_base;
  787. r = amdgpu_modeset_create_props(adev);
  788. if (r)
  789. return r;
  790. return 0;
  791. }
  792. #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
  793. defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
  794. static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
  795. {
  796. struct amdgpu_display_manager *dm = bl_get_data(bd);
  797. if (dc_link_set_backlight_level(dm->backlight_link,
  798. bd->props.brightness, 0, 0))
  799. return 0;
  800. else
  801. return 1;
  802. }
  803. static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
  804. {
  805. return bd->props.brightness;
  806. }
  807. static const struct backlight_ops amdgpu_dm_backlight_ops = {
  808. .get_brightness = amdgpu_dm_backlight_get_brightness,
  809. .update_status = amdgpu_dm_backlight_update_status,
  810. };
  811. void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
  812. {
  813. char bl_name[16];
  814. struct backlight_properties props = { 0 };
  815. props.max_brightness = AMDGPU_MAX_BL_LEVEL;
  816. props.type = BACKLIGHT_RAW;
  817. snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
  818. dm->adev->ddev->primary->index);
  819. dm->backlight_dev = backlight_device_register(bl_name,
  820. dm->adev->ddev->dev,
  821. dm,
  822. &amdgpu_dm_backlight_ops,
  823. &props);
  824. if (NULL == dm->backlight_dev)
  825. DRM_ERROR("DM: Backlight registration failed!\n");
  826. else
  827. DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
  828. }
  829. #endif
  830. /* In this architecture, the association
  831. * connector -> encoder -> crtc
  832. * id not really requried. The crtc and connector will hold the
  833. * display_index as an abstraction to use with DAL component
  834. *
  835. * Returns 0 on success
  836. */
  837. int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
  838. {
  839. struct amdgpu_display_manager *dm = &adev->dm;
  840. uint32_t i;
  841. struct amdgpu_connector *aconnector;
  842. struct amdgpu_encoder *aencoder;
  843. struct amdgpu_crtc *acrtc;
  844. uint32_t link_cnt;
  845. link_cnt = dm->dc->caps.max_links;
  846. if (amdgpu_dm_mode_config_init(dm->adev)) {
  847. DRM_ERROR("DM: Failed to initialize mode config\n");
  848. return -1;
  849. }
  850. for (i = 0; i < dm->dc->caps.max_streams; i++) {
  851. acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
  852. if (!acrtc)
  853. goto fail;
  854. if (amdgpu_dm_crtc_init(
  855. dm,
  856. acrtc,
  857. i)) {
  858. DRM_ERROR("KMS: Failed to initialize crtc\n");
  859. kfree(acrtc);
  860. goto fail;
  861. }
  862. }
  863. dm->display_indexes_num = dm->dc->caps.max_streams;
  864. /* loops over all connectors on the board */
  865. for (i = 0; i < link_cnt; i++) {
  866. if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
  867. DRM_ERROR(
  868. "KMS: Cannot support more than %d display indexes\n",
  869. AMDGPU_DM_MAX_DISPLAY_INDEX);
  870. continue;
  871. }
  872. aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
  873. if (!aconnector)
  874. goto fail;
  875. aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
  876. if (!aencoder) {
  877. goto fail_free_connector;
  878. }
  879. if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
  880. DRM_ERROR("KMS: Failed to initialize encoder\n");
  881. goto fail_free_encoder;
  882. }
  883. if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
  884. DRM_ERROR("KMS: Failed to initialize connector\n");
  885. goto fail_free_connector;
  886. }
  887. if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
  888. amdgpu_dm_update_connector_after_detect(aconnector);
  889. }
  890. /* Software is initialized. Now we can register interrupt handlers. */
  891. switch (adev->asic_type) {
  892. case CHIP_BONAIRE:
  893. case CHIP_HAWAII:
  894. case CHIP_TONGA:
  895. case CHIP_FIJI:
  896. case CHIP_CARRIZO:
  897. case CHIP_STONEY:
  898. case CHIP_POLARIS11:
  899. case CHIP_POLARIS10:
  900. case CHIP_POLARIS12:
  901. case CHIP_VEGA10:
  902. if (dce110_register_irq_handlers(dm->adev)) {
  903. DRM_ERROR("DM: Failed to initialize IRQ\n");
  904. return -1;
  905. }
  906. break;
  907. default:
  908. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  909. return -1;
  910. }
  911. drm_mode_config_reset(dm->ddev);
  912. return 0;
  913. fail_free_encoder:
  914. kfree(aencoder);
  915. fail_free_connector:
  916. kfree(aconnector);
  917. fail:
  918. return -1;
  919. }
  920. void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
  921. {
  922. drm_mode_config_cleanup(dm->ddev);
  923. return;
  924. }
  925. /******************************************************************************
  926. * amdgpu_display_funcs functions
  927. *****************************************************************************/
  928. /**
  929. * dm_bandwidth_update - program display watermarks
  930. *
  931. * @adev: amdgpu_device pointer
  932. *
  933. * Calculate and program the display watermarks and line buffer allocation.
  934. */
  935. static void dm_bandwidth_update(struct amdgpu_device *adev)
  936. {
  937. /* TODO: implement later */
  938. }
  939. static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
  940. u8 level)
  941. {
  942. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  943. }
  944. static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
  945. {
  946. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  947. return 0;
  948. }
  949. /******************************************************************************
  950. * Page Flip functions
  951. ******************************************************************************/
  952. /**
  953. * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
  954. * via DRM IOCTL, by user mode.
  955. *
  956. * @adev: amdgpu_device pointer
  957. * @crtc_id: crtc to cleanup pageflip on
  958. * @crtc_base: new address of the crtc (GPU MC address)
  959. *
  960. * Does the actual pageflip (surface address update).
  961. */
  962. static void dm_page_flip(struct amdgpu_device *adev,
  963. int crtc_id, u64 crtc_base, bool async)
  964. {
  965. struct amdgpu_crtc *acrtc;
  966. const struct dc_stream *stream;
  967. struct dc_flip_addrs addr = { {0} };
  968. /*
  969. * TODO risk of concurrency issues
  970. *
  971. * This should guarded by the dal_mutex but we can't do this since the
  972. * caller uses a spin_lock on event_lock.
  973. *
  974. * If we wait on the dal_mutex a second page flip interrupt might come,
  975. * spin on the event_lock, disabling interrupts while it does so. At
  976. * this point the core can no longer be pre-empted and return to the
  977. * thread that waited on the dal_mutex and we're deadlocked.
  978. *
  979. * With multiple cores the same essentially happens but might just take
  980. * a little longer to lock up all cores.
  981. *
  982. * The reason we should lock on dal_mutex is so that we can be sure
  983. * nobody messes with acrtc->stream after we read and check its value.
  984. *
  985. * We might be able to fix our concurrency issues with a work queue
  986. * where we schedule all work items (mode_set, page_flip, etc.) and
  987. * execute them one by one. Care needs to be taken to still deal with
  988. * any potential concurrency issues arising from interrupt calls.
  989. */
  990. acrtc = adev->mode_info.crtcs[crtc_id];
  991. stream = acrtc->stream;
  992. if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
  993. DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
  994. /* In commit tail framework this cannot happen */
  995. BUG_ON(0);
  996. }
  997. /*
  998. * Received a page flip call after the display has been reset.
  999. * Just return in this case. Everything should be clean-up on reset.
  1000. */
  1001. if (!stream) {
  1002. WARN_ON(1);
  1003. return;
  1004. }
  1005. addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
  1006. addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
  1007. addr.flip_immediate = async;
  1008. if (acrtc->base.state->event &&
  1009. acrtc->base.state->event->event.base.type ==
  1010. DRM_EVENT_FLIP_COMPLETE) {
  1011. acrtc->event = acrtc->base.state->event;
  1012. /* Set the flip status */
  1013. acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
  1014. /* Mark this event as consumed */
  1015. acrtc->base.state->event = NULL;
  1016. }
  1017. dc_flip_surface_addrs(adev->dm.dc,
  1018. dc_stream_get_status(stream)->surfaces,
  1019. &addr, 1);
  1020. DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
  1021. __func__,
  1022. addr.address.grph.addr.high_part,
  1023. addr.address.grph.addr.low_part);
  1024. }
  1025. static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
  1026. struct drm_file *filp)
  1027. {
  1028. struct mod_freesync_params freesync_params;
  1029. uint8_t num_streams;
  1030. uint8_t i;
  1031. struct amdgpu_device *adev = dev->dev_private;
  1032. int r = 0;
  1033. /* Get freesync enable flag from DRM */
  1034. num_streams = dc_get_current_stream_count(adev->dm.dc);
  1035. for (i = 0; i < num_streams; i++) {
  1036. const struct dc_stream *stream;
  1037. stream = dc_get_stream_at_index(adev->dm.dc, i);
  1038. mod_freesync_update_state(adev->dm.freesync_module,
  1039. &stream, 1, &freesync_params);
  1040. }
  1041. return r;
  1042. }
  1043. static const struct amdgpu_display_funcs dm_display_funcs = {
  1044. .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
  1045. .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
  1046. .vblank_wait = NULL,
  1047. .backlight_set_level =
  1048. dm_set_backlight_level,/* called unconditionally */
  1049. .backlight_get_level =
  1050. dm_get_backlight_level,/* called unconditionally */
  1051. .hpd_sense = NULL,/* called unconditionally */
  1052. .hpd_set_polarity = NULL, /* called unconditionally */
  1053. .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
  1054. .page_flip = dm_page_flip, /* called unconditionally */
  1055. .page_flip_get_scanoutpos =
  1056. dm_crtc_get_scanoutpos,/* called unconditionally */
  1057. .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
  1058. .add_connector = NULL, /* VBIOS parsing. DAL does it. */
  1059. .notify_freesync = amdgpu_notify_freesync,
  1060. };
  1061. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1062. static ssize_t s3_debug_store(
  1063. struct device *device,
  1064. struct device_attribute *attr,
  1065. const char *buf,
  1066. size_t count)
  1067. {
  1068. int ret;
  1069. int s3_state;
  1070. struct pci_dev *pdev = to_pci_dev(device);
  1071. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  1072. struct amdgpu_device *adev = drm_dev->dev_private;
  1073. ret = kstrtoint(buf, 0, &s3_state);
  1074. if (ret == 0) {
  1075. if (s3_state) {
  1076. dm_resume(adev);
  1077. amdgpu_dm_display_resume(adev);
  1078. drm_kms_helper_hotplug_event(adev->ddev);
  1079. } else
  1080. dm_suspend(adev);
  1081. }
  1082. return ret == 0 ? count : 0;
  1083. }
  1084. DEVICE_ATTR_WO(s3_debug);
  1085. #endif
  1086. static int dm_early_init(void *handle)
  1087. {
  1088. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1089. amdgpu_dm_set_irq_funcs(adev);
  1090. switch (adev->asic_type) {
  1091. case CHIP_BONAIRE:
  1092. case CHIP_HAWAII:
  1093. adev->mode_info.num_crtc = 6;
  1094. adev->mode_info.num_hpd = 6;
  1095. adev->mode_info.num_dig = 6;
  1096. break;
  1097. case CHIP_FIJI:
  1098. case CHIP_TONGA:
  1099. adev->mode_info.num_crtc = 6;
  1100. adev->mode_info.num_hpd = 6;
  1101. adev->mode_info.num_dig = 7;
  1102. break;
  1103. case CHIP_CARRIZO:
  1104. adev->mode_info.num_crtc = 3;
  1105. adev->mode_info.num_hpd = 6;
  1106. adev->mode_info.num_dig = 9;
  1107. break;
  1108. case CHIP_STONEY:
  1109. adev->mode_info.num_crtc = 2;
  1110. adev->mode_info.num_hpd = 6;
  1111. adev->mode_info.num_dig = 9;
  1112. break;
  1113. case CHIP_POLARIS11:
  1114. case CHIP_POLARIS12:
  1115. adev->mode_info.num_crtc = 5;
  1116. adev->mode_info.num_hpd = 5;
  1117. adev->mode_info.num_dig = 5;
  1118. break;
  1119. case CHIP_POLARIS10:
  1120. adev->mode_info.num_crtc = 6;
  1121. adev->mode_info.num_hpd = 6;
  1122. adev->mode_info.num_dig = 6;
  1123. break;
  1124. case CHIP_VEGA10:
  1125. adev->mode_info.num_crtc = 6;
  1126. adev->mode_info.num_hpd = 6;
  1127. adev->mode_info.num_dig = 6;
  1128. break;
  1129. default:
  1130. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  1131. return -EINVAL;
  1132. }
  1133. if (adev->mode_info.funcs == NULL)
  1134. adev->mode_info.funcs = &dm_display_funcs;
  1135. /* Note: Do NOT change adev->audio_endpt_rreg and
  1136. * adev->audio_endpt_wreg because they are initialised in
  1137. * amdgpu_device_init() */
  1138. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1139. device_create_file(
  1140. adev->ddev->dev,
  1141. &dev_attr_s3_debug);
  1142. #endif
  1143. return 0;
  1144. }
  1145. bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
  1146. {
  1147. /* TODO */
  1148. return true;
  1149. }
  1150. bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
  1151. {
  1152. /* TODO */
  1153. return true;
  1154. }