amdgpu_dm.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: AMD
  23. *
  24. */
  25. #include "dm_services_types.h"
  26. #include "dc.h"
  27. #include "vid.h"
  28. #include "amdgpu.h"
  29. #include "amdgpu_display.h"
  30. #include "atom.h"
  31. #include "amdgpu_dm.h"
  32. #include "amdgpu_dm_types.h"
  33. #include "amd_shared.h"
  34. #include "amdgpu_dm_irq.h"
  35. #include "dm_helpers.h"
  36. #include "ivsrcid/ivsrcid_vislands30.h"
  37. #include <linux/module.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/version.h>
  40. #include <drm/drm_atomic.h>
  41. #include <drm/drm_atomic_helper.h>
  42. #include <drm/drm_dp_mst_helper.h>
  43. #include "modules/inc/mod_freesync.h"
  44. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  45. #include "ivsrcid/irqsrcs_dcn_1_0.h"
  46. #include "raven1/DCN/dcn_1_0_offset.h"
  47. #include "raven1/DCN/dcn_1_0_sh_mask.h"
  48. #include "vega10/soc15ip.h"
  49. #include "soc15_common.h"
  50. #endif
  51. static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = {
  52. DRM_PLANE_TYPE_PRIMARY,
  53. DRM_PLANE_TYPE_PRIMARY,
  54. DRM_PLANE_TYPE_PRIMARY,
  55. DRM_PLANE_TYPE_PRIMARY,
  56. DRM_PLANE_TYPE_PRIMARY,
  57. DRM_PLANE_TYPE_PRIMARY,
  58. };
  59. static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = {
  60. DRM_PLANE_TYPE_PRIMARY,
  61. DRM_PLANE_TYPE_PRIMARY,
  62. DRM_PLANE_TYPE_PRIMARY,
  63. DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
  64. };
  65. static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = {
  66. DRM_PLANE_TYPE_PRIMARY,
  67. DRM_PLANE_TYPE_PRIMARY,
  68. DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
  69. };
  70. /*
  71. * dm_vblank_get_counter
  72. *
  73. * @brief
  74. * Get counter for number of vertical blanks
  75. *
  76. * @param
  77. * struct amdgpu_device *adev - [in] desired amdgpu device
  78. * int disp_idx - [in] which CRTC to get the counter from
  79. *
  80. * @return
  81. * Counter for vertical blanks
  82. */
  83. static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
  84. {
  85. if (crtc >= adev->mode_info.num_crtc)
  86. return 0;
  87. else {
  88. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  89. if (NULL == acrtc->stream) {
  90. DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
  91. return 0;
  92. }
  93. return dc_stream_get_vblank_counter(acrtc->stream);
  94. }
  95. }
  96. static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
  97. u32 *vbl, u32 *position)
  98. {
  99. uint32_t v_blank_start, v_blank_end, h_position, v_position;
  100. if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  101. return -EINVAL;
  102. else {
  103. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  104. if (NULL == acrtc->stream) {
  105. DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
  106. return 0;
  107. }
  108. /*
  109. * TODO rework base driver to use values directly.
  110. * for now parse it back into reg-format
  111. */
  112. dc_stream_get_scanoutpos(acrtc->stream,
  113. &v_blank_start,
  114. &v_blank_end,
  115. &h_position,
  116. &v_position);
  117. *position = v_position | (h_position << 16);
  118. *vbl = v_blank_start | (v_blank_end << 16);
  119. }
  120. return 0;
  121. }
  122. static bool dm_is_idle(void *handle)
  123. {
  124. /* XXX todo */
  125. return true;
  126. }
  127. static int dm_wait_for_idle(void *handle)
  128. {
  129. /* XXX todo */
  130. return 0;
  131. }
  132. static bool dm_check_soft_reset(void *handle)
  133. {
  134. return false;
  135. }
  136. static int dm_soft_reset(void *handle)
  137. {
  138. /* XXX todo */
  139. return 0;
  140. }
  141. static struct amdgpu_crtc *get_crtc_by_otg_inst(
  142. struct amdgpu_device *adev,
  143. int otg_inst)
  144. {
  145. struct drm_device *dev = adev->ddev;
  146. struct drm_crtc *crtc;
  147. struct amdgpu_crtc *amdgpu_crtc;
  148. /*
  149. * following if is check inherited from both functions where this one is
  150. * used now. Need to be checked why it could happen.
  151. */
  152. if (otg_inst == -1) {
  153. WARN_ON(1);
  154. return adev->mode_info.crtcs[0];
  155. }
  156. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  157. amdgpu_crtc = to_amdgpu_crtc(crtc);
  158. if (amdgpu_crtc->otg_inst == otg_inst)
  159. return amdgpu_crtc;
  160. }
  161. return NULL;
  162. }
  163. static void dm_pflip_high_irq(void *interrupt_params)
  164. {
  165. struct amdgpu_crtc *amdgpu_crtc;
  166. struct common_irq_params *irq_params = interrupt_params;
  167. struct amdgpu_device *adev = irq_params->adev;
  168. unsigned long flags;
  169. amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
  170. /* IRQ could occur when in initial stage */
  171. /*TODO work and BO cleanup */
  172. if (amdgpu_crtc == NULL) {
  173. DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
  174. return;
  175. }
  176. spin_lock_irqsave(&adev->ddev->event_lock, flags);
  177. if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
  178. DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
  179. amdgpu_crtc->pflip_status,
  180. AMDGPU_FLIP_SUBMITTED,
  181. amdgpu_crtc->crtc_id,
  182. amdgpu_crtc);
  183. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  184. return;
  185. }
  186. /* wakeup usersapce */
  187. if (amdgpu_crtc->event) {
  188. /* Update to correct count/ts if racing with vblank irq */
  189. drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
  190. drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
  191. /* page flip completed. clean up */
  192. amdgpu_crtc->event = NULL;
  193. } else
  194. WARN_ON(1);
  195. amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
  196. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  197. DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
  198. __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
  199. drm_crtc_vblank_put(&amdgpu_crtc->base);
  200. }
  201. static void dm_crtc_high_irq(void *interrupt_params)
  202. {
  203. struct common_irq_params *irq_params = interrupt_params;
  204. struct amdgpu_device *adev = irq_params->adev;
  205. uint8_t crtc_index = 0;
  206. struct amdgpu_crtc *acrtc;
  207. acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
  208. if (acrtc)
  209. crtc_index = acrtc->crtc_id;
  210. drm_handle_vblank(adev->ddev, crtc_index);
  211. }
  212. static int dm_set_clockgating_state(void *handle,
  213. enum amd_clockgating_state state)
  214. {
  215. return 0;
  216. }
  217. static int dm_set_powergating_state(void *handle,
  218. enum amd_powergating_state state)
  219. {
  220. return 0;
  221. }
  222. /* Prototypes of private functions */
  223. static int dm_early_init(void* handle);
  224. static void hotplug_notify_work_func(struct work_struct *work)
  225. {
  226. struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
  227. struct drm_device *dev = dm->ddev;
  228. drm_kms_helper_hotplug_event(dev);
  229. }
  230. /* Init display KMS
  231. *
  232. * Returns 0 on success
  233. */
  234. int amdgpu_dm_init(struct amdgpu_device *adev)
  235. {
  236. struct dc_init_data init_data;
  237. adev->dm.ddev = adev->ddev;
  238. adev->dm.adev = adev;
  239. DRM_INFO("DAL is enabled\n");
  240. /* Zero all the fields */
  241. memset(&init_data, 0, sizeof(init_data));
  242. /* initialize DAL's lock (for SYNC context use) */
  243. spin_lock_init(&adev->dm.dal_lock);
  244. /* initialize DAL's mutex */
  245. mutex_init(&adev->dm.dal_mutex);
  246. if(amdgpu_dm_irq_init(adev)) {
  247. DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
  248. goto error;
  249. }
  250. init_data.asic_id.chip_family = adev->family;
  251. init_data.asic_id.pci_revision_id = adev->rev_id;
  252. init_data.asic_id.hw_internal_rev = adev->external_rev_id;
  253. init_data.asic_id.vram_width = adev->mc.vram_width;
  254. /* TODO: initialize init_data.asic_id.vram_type here!!!! */
  255. init_data.asic_id.atombios_base_address =
  256. adev->mode_info.atom_context->bios;
  257. init_data.driver = adev;
  258. adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
  259. if (!adev->dm.cgs_device) {
  260. DRM_ERROR("amdgpu: failed to create cgs device.\n");
  261. goto error;
  262. }
  263. init_data.cgs_device = adev->dm.cgs_device;
  264. adev->dm.dal = NULL;
  265. init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
  266. /* Display Core create. */
  267. adev->dm.dc = dc_create(&init_data);
  268. if (!adev->dm.dc)
  269. DRM_INFO("Display Core failed to initialize!\n");
  270. INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
  271. adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
  272. if (!adev->dm.freesync_module) {
  273. DRM_ERROR(
  274. "amdgpu: failed to initialize freesync_module.\n");
  275. } else
  276. DRM_INFO("amdgpu: freesync_module init done %p.\n",
  277. adev->dm.freesync_module);
  278. if (amdgpu_dm_initialize_drm_device(adev)) {
  279. DRM_ERROR(
  280. "amdgpu: failed to initialize sw for display support.\n");
  281. goto error;
  282. }
  283. /* Update the actual used number of crtc */
  284. adev->mode_info.num_crtc = adev->dm.display_indexes_num;
  285. /* TODO: Add_display_info? */
  286. /* TODO use dynamic cursor width */
  287. adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
  288. adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
  289. if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
  290. DRM_ERROR(
  291. "amdgpu: failed to initialize sw for display support.\n");
  292. goto error;
  293. }
  294. DRM_INFO("KMS initialized.\n");
  295. return 0;
  296. error:
  297. amdgpu_dm_fini(adev);
  298. return -1;
  299. }
  300. void amdgpu_dm_fini(struct amdgpu_device *adev)
  301. {
  302. amdgpu_dm_destroy_drm_device(&adev->dm);
  303. /*
  304. * TODO: pageflip, vlank interrupt
  305. *
  306. * amdgpu_dm_irq_fini(adev);
  307. */
  308. if (adev->dm.cgs_device) {
  309. amdgpu_cgs_destroy_device(adev->dm.cgs_device);
  310. adev->dm.cgs_device = NULL;
  311. }
  312. if (adev->dm.freesync_module) {
  313. mod_freesync_destroy(adev->dm.freesync_module);
  314. adev->dm.freesync_module = NULL;
  315. }
  316. /* DC Destroy TODO: Replace destroy DAL */
  317. if (adev->dm.dc)
  318. dc_destroy(&adev->dm.dc);
  319. return;
  320. }
  321. /* moved from amdgpu_dm_kms.c */
  322. void amdgpu_dm_destroy()
  323. {
  324. }
  325. static int dm_sw_init(void *handle)
  326. {
  327. return 0;
  328. }
  329. static int dm_sw_fini(void *handle)
  330. {
  331. return 0;
  332. }
  333. static int detect_mst_link_for_all_connectors(struct drm_device *dev)
  334. {
  335. struct amdgpu_connector *aconnector;
  336. struct drm_connector *connector;
  337. int ret = 0;
  338. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  339. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  340. aconnector = to_amdgpu_connector(connector);
  341. if (aconnector->dc_link->type == dc_connection_mst_branch) {
  342. DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
  343. aconnector, aconnector->base.base.id);
  344. ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
  345. if (ret < 0) {
  346. DRM_ERROR("DM_MST: Failed to start MST\n");
  347. ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
  348. return ret;
  349. }
  350. }
  351. }
  352. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  353. return ret;
  354. }
  355. static int dm_late_init(void *handle)
  356. {
  357. struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
  358. int r = detect_mst_link_for_all_connectors(dev);
  359. return r;
  360. }
  361. static void s3_handle_mst(struct drm_device *dev, bool suspend)
  362. {
  363. struct amdgpu_connector *aconnector;
  364. struct drm_connector *connector;
  365. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  366. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  367. aconnector = to_amdgpu_connector(connector);
  368. if (aconnector->dc_link->type == dc_connection_mst_branch &&
  369. !aconnector->mst_port) {
  370. if (suspend)
  371. drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
  372. else
  373. drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
  374. }
  375. }
  376. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  377. }
  378. static int dm_hw_init(void *handle)
  379. {
  380. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  381. /* Create DAL display manager */
  382. amdgpu_dm_init(adev);
  383. amdgpu_dm_hpd_init(adev);
  384. return 0;
  385. }
  386. static int dm_hw_fini(void *handle)
  387. {
  388. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  389. amdgpu_dm_hpd_fini(adev);
  390. amdgpu_dm_irq_fini(adev);
  391. amdgpu_dm_fini(adev);
  392. return 0;
  393. }
  394. static int dm_suspend(void *handle)
  395. {
  396. struct amdgpu_device *adev = handle;
  397. struct amdgpu_display_manager *dm = &adev->dm;
  398. int ret = 0;
  399. s3_handle_mst(adev->ddev, true);
  400. amdgpu_dm_irq_suspend(adev);
  401. adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
  402. dc_set_power_state(
  403. dm->dc,
  404. DC_ACPI_CM_POWER_STATE_D3
  405. );
  406. return ret;
  407. }
  408. struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
  409. struct drm_atomic_state *state,
  410. struct drm_crtc *crtc,
  411. bool from_state_var)
  412. {
  413. uint32_t i;
  414. struct drm_connector_state *conn_state;
  415. struct drm_connector *connector;
  416. struct drm_crtc *crtc_from_state;
  417. for_each_connector_in_state(
  418. state,
  419. connector,
  420. conn_state,
  421. i) {
  422. crtc_from_state =
  423. from_state_var ?
  424. conn_state->crtc :
  425. connector->state->crtc;
  426. if (crtc_from_state == crtc)
  427. return to_amdgpu_connector(connector);
  428. }
  429. return NULL;
  430. }
  431. static int dm_resume(void *handle)
  432. {
  433. struct amdgpu_device *adev = handle;
  434. struct amdgpu_display_manager *dm = &adev->dm;
  435. /* power on hardware */
  436. dc_set_power_state(
  437. dm->dc,
  438. DC_ACPI_CM_POWER_STATE_D0
  439. );
  440. return 0;
  441. }
  442. int amdgpu_dm_display_resume(struct amdgpu_device *adev )
  443. {
  444. struct drm_device *ddev = adev->ddev;
  445. struct amdgpu_display_manager *dm = &adev->dm;
  446. struct amdgpu_connector *aconnector;
  447. struct drm_connector *connector;
  448. struct drm_crtc *crtc;
  449. struct drm_crtc_state *crtc_state;
  450. int ret = 0;
  451. int i;
  452. /* program HPD filter */
  453. dc_resume(dm->dc);
  454. /* On resume we need to rewrite the MSTM control bits to enamble MST*/
  455. s3_handle_mst(ddev, false);
  456. /*
  457. * early enable HPD Rx IRQ, should be done before set mode as short
  458. * pulse interrupts are used for MST
  459. */
  460. amdgpu_dm_irq_resume_early(adev);
  461. /* Do detection*/
  462. list_for_each_entry(connector,
  463. &ddev->mode_config.connector_list, head) {
  464. aconnector = to_amdgpu_connector(connector);
  465. /*
  466. * this is the case when traversing through already created
  467. * MST connectors, should be skipped
  468. */
  469. if (aconnector->mst_port)
  470. continue;
  471. mutex_lock(&aconnector->hpd_lock);
  472. dc_link_detect(aconnector->dc_link, false);
  473. aconnector->dc_sink = NULL;
  474. amdgpu_dm_update_connector_after_detect(aconnector);
  475. mutex_unlock(&aconnector->hpd_lock);
  476. }
  477. /* Force mode set in atomic comit */
  478. for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
  479. crtc_state->active_changed = true;
  480. ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
  481. amdgpu_dm_irq_resume_late(adev);
  482. return ret;
  483. }
  484. static const struct amd_ip_funcs amdgpu_dm_funcs = {
  485. .name = "dm",
  486. .early_init = dm_early_init,
  487. .late_init = dm_late_init,
  488. .sw_init = dm_sw_init,
  489. .sw_fini = dm_sw_fini,
  490. .hw_init = dm_hw_init,
  491. .hw_fini = dm_hw_fini,
  492. .suspend = dm_suspend,
  493. .resume = dm_resume,
  494. .is_idle = dm_is_idle,
  495. .wait_for_idle = dm_wait_for_idle,
  496. .check_soft_reset = dm_check_soft_reset,
  497. .soft_reset = dm_soft_reset,
  498. .set_clockgating_state = dm_set_clockgating_state,
  499. .set_powergating_state = dm_set_powergating_state,
  500. };
  501. const struct amdgpu_ip_block_version dm_ip_block =
  502. {
  503. .type = AMD_IP_BLOCK_TYPE_DCE,
  504. .major = 1,
  505. .minor = 0,
  506. .rev = 0,
  507. .funcs = &amdgpu_dm_funcs,
  508. };
  509. /* TODO: it is temporary non-const, should fixed later */
  510. static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
  511. .fb_create = amdgpu_user_framebuffer_create,
  512. .output_poll_changed = amdgpu_output_poll_changed,
  513. .atomic_check = amdgpu_dm_atomic_check,
  514. .atomic_commit = drm_atomic_helper_commit
  515. };
  516. static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
  517. .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
  518. };
  519. void amdgpu_dm_update_connector_after_detect(
  520. struct amdgpu_connector *aconnector)
  521. {
  522. struct drm_connector *connector = &aconnector->base;
  523. struct drm_device *dev = connector->dev;
  524. const struct dc_sink *sink;
  525. /* MST handled by drm_mst framework */
  526. if (aconnector->mst_mgr.mst_state == true)
  527. return;
  528. sink = aconnector->dc_link->local_sink;
  529. /* Edid mgmt connector gets first update only in mode_valid hook and then
  530. * the connector sink is set to either fake or physical sink depends on link status.
  531. * don't do it here if u are during boot
  532. */
  533. if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
  534. && aconnector->dc_em_sink) {
  535. /* For S3 resume with headless use eml_sink to fake stream
  536. * because on resume connecotr->sink is set ti NULL
  537. */
  538. mutex_lock(&dev->mode_config.mutex);
  539. if (sink) {
  540. if (aconnector->dc_sink) {
  541. amdgpu_dm_remove_sink_from_freesync_module(
  542. connector);
  543. /* retain and release bellow are used for
  544. * bump up refcount for sink because the link don't point
  545. * to it anymore after disconnect so on next crtc to connector
  546. * reshuffle by UMD we will get into unwanted dc_sink release
  547. */
  548. if (aconnector->dc_sink != aconnector->dc_em_sink)
  549. dc_sink_release(aconnector->dc_sink);
  550. }
  551. aconnector->dc_sink = sink;
  552. amdgpu_dm_add_sink_to_freesync_module(
  553. connector, aconnector->edid);
  554. } else {
  555. amdgpu_dm_remove_sink_from_freesync_module(connector);
  556. if (!aconnector->dc_sink)
  557. aconnector->dc_sink = aconnector->dc_em_sink;
  558. else if (aconnector->dc_sink != aconnector->dc_em_sink)
  559. dc_sink_retain(aconnector->dc_sink);
  560. }
  561. mutex_unlock(&dev->mode_config.mutex);
  562. return;
  563. }
  564. /*
  565. * TODO: temporary guard to look for proper fix
  566. * if this sink is MST sink, we should not do anything
  567. */
  568. if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
  569. return;
  570. if (aconnector->dc_sink == sink) {
  571. /* We got a DP short pulse (Link Loss, DP CTS, etc...).
  572. * Do nothing!! */
  573. DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
  574. aconnector->connector_id);
  575. return;
  576. }
  577. DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
  578. aconnector->connector_id, aconnector->dc_sink, sink);
  579. mutex_lock(&dev->mode_config.mutex);
  580. /* 1. Update status of the drm connector
  581. * 2. Send an event and let userspace tell us what to do */
  582. if (sink) {
  583. /* TODO: check if we still need the S3 mode update workaround.
  584. * If yes, put it here. */
  585. if (aconnector->dc_sink)
  586. amdgpu_dm_remove_sink_from_freesync_module(
  587. connector);
  588. aconnector->dc_sink = sink;
  589. if (sink->dc_edid.length == 0)
  590. aconnector->edid = NULL;
  591. else {
  592. aconnector->edid =
  593. (struct edid *) sink->dc_edid.raw_edid;
  594. drm_mode_connector_update_edid_property(connector,
  595. aconnector->edid);
  596. }
  597. amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
  598. } else {
  599. amdgpu_dm_remove_sink_from_freesync_module(connector);
  600. drm_mode_connector_update_edid_property(connector, NULL);
  601. aconnector->num_modes = 0;
  602. aconnector->dc_sink = NULL;
  603. }
  604. mutex_unlock(&dev->mode_config.mutex);
  605. }
  606. static void handle_hpd_irq(void *param)
  607. {
  608. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  609. struct drm_connector *connector = &aconnector->base;
  610. struct drm_device *dev = connector->dev;
  611. /* In case of failure or MST no need to update connector status or notify the OS
  612. * since (for MST case) MST does this in it's own context.
  613. */
  614. mutex_lock(&aconnector->hpd_lock);
  615. if (dc_link_detect(aconnector->dc_link, false)) {
  616. amdgpu_dm_update_connector_after_detect(aconnector);
  617. drm_modeset_lock_all(dev);
  618. dm_restore_drm_connector_state(dev, connector);
  619. drm_modeset_unlock_all(dev);
  620. if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
  621. drm_kms_helper_hotplug_event(dev);
  622. }
  623. mutex_unlock(&aconnector->hpd_lock);
  624. }
  625. static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
  626. {
  627. uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
  628. uint8_t dret;
  629. bool new_irq_handled = false;
  630. int dpcd_addr;
  631. int dpcd_bytes_to_read;
  632. const int max_process_count = 30;
  633. int process_count = 0;
  634. const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
  635. if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
  636. dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
  637. /* DPCD 0x200 - 0x201 for downstream IRQ */
  638. dpcd_addr = DP_SINK_COUNT;
  639. } else {
  640. dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
  641. /* DPCD 0x2002 - 0x2005 for downstream IRQ */
  642. dpcd_addr = DP_SINK_COUNT_ESI;
  643. }
  644. dret = drm_dp_dpcd_read(
  645. &aconnector->dm_dp_aux.aux,
  646. dpcd_addr,
  647. esi,
  648. dpcd_bytes_to_read);
  649. while (dret == dpcd_bytes_to_read &&
  650. process_count < max_process_count) {
  651. uint8_t retry;
  652. dret = 0;
  653. process_count++;
  654. DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  655. /* handle HPD short pulse irq */
  656. if (aconnector->mst_mgr.mst_state)
  657. drm_dp_mst_hpd_irq(
  658. &aconnector->mst_mgr,
  659. esi,
  660. &new_irq_handled);
  661. if (new_irq_handled) {
  662. /* ACK at DPCD to notify down stream */
  663. const int ack_dpcd_bytes_to_write =
  664. dpcd_bytes_to_read - 1;
  665. for (retry = 0; retry < 3; retry++) {
  666. uint8_t wret;
  667. wret = drm_dp_dpcd_write(
  668. &aconnector->dm_dp_aux.aux,
  669. dpcd_addr + 1,
  670. &esi[1],
  671. ack_dpcd_bytes_to_write);
  672. if (wret == ack_dpcd_bytes_to_write)
  673. break;
  674. }
  675. /* check if there is new irq to be handle */
  676. dret = drm_dp_dpcd_read(
  677. &aconnector->dm_dp_aux.aux,
  678. dpcd_addr,
  679. esi,
  680. dpcd_bytes_to_read);
  681. new_irq_handled = false;
  682. } else
  683. break;
  684. }
  685. if (process_count == max_process_count)
  686. DRM_DEBUG_KMS("Loop exceeded max iterations\n");
  687. }
  688. static void handle_hpd_rx_irq(void *param)
  689. {
  690. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  691. struct drm_connector *connector = &aconnector->base;
  692. struct drm_device *dev = connector->dev;
  693. const struct dc_link *dc_link = aconnector->dc_link;
  694. bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
  695. /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
  696. * conflict, after implement i2c helper, this mutex should be
  697. * retired.
  698. */
  699. if (aconnector->dc_link->type != dc_connection_mst_branch)
  700. mutex_lock(&aconnector->hpd_lock);
  701. if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
  702. !is_mst_root_connector) {
  703. /* Downstream Port status changed. */
  704. if (dc_link_detect(aconnector->dc_link, false)) {
  705. amdgpu_dm_update_connector_after_detect(aconnector);
  706. drm_modeset_lock_all(dev);
  707. dm_restore_drm_connector_state(dev, connector);
  708. drm_modeset_unlock_all(dev);
  709. drm_kms_helper_hotplug_event(dev);
  710. }
  711. }
  712. if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
  713. (dc_link->type == dc_connection_mst_branch))
  714. dm_handle_hpd_rx_irq(aconnector);
  715. if (aconnector->dc_link->type != dc_connection_mst_branch)
  716. mutex_unlock(&aconnector->hpd_lock);
  717. }
  718. static void register_hpd_handlers(struct amdgpu_device *adev)
  719. {
  720. struct drm_device *dev = adev->ddev;
  721. struct drm_connector *connector;
  722. struct amdgpu_connector *aconnector;
  723. const struct dc_link *dc_link;
  724. struct dc_interrupt_params int_params = {0};
  725. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  726. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  727. list_for_each_entry(connector,
  728. &dev->mode_config.connector_list, head) {
  729. aconnector = to_amdgpu_connector(connector);
  730. dc_link = aconnector->dc_link;
  731. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
  732. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  733. int_params.irq_source = dc_link->irq_source_hpd;
  734. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  735. handle_hpd_irq,
  736. (void *) aconnector);
  737. }
  738. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
  739. /* Also register for DP short pulse (hpd_rx). */
  740. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  741. int_params.irq_source = dc_link->irq_source_hpd_rx;
  742. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  743. handle_hpd_rx_irq,
  744. (void *) aconnector);
  745. }
  746. }
  747. }
  748. /* Register IRQ sources and initialize IRQ callbacks */
  749. static int dce110_register_irq_handlers(struct amdgpu_device *adev)
  750. {
  751. struct dc *dc = adev->dm.dc;
  752. struct common_irq_params *c_irq_params;
  753. struct dc_interrupt_params int_params = {0};
  754. int r;
  755. int i;
  756. unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
  757. if (adev->asic_type == CHIP_VEGA10 ||
  758. adev->asic_type == CHIP_RAVEN)
  759. client_id = AMDGPU_IH_CLIENTID_DCE;
  760. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  761. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  762. /* Actions of amdgpu_irq_add_id():
  763. * 1. Register a set() function with base driver.
  764. * Base driver will call set() function to enable/disable an
  765. * interrupt in DC hardware.
  766. * 2. Register amdgpu_dm_irq_handler().
  767. * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
  768. * coming from DC hardware.
  769. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
  770. * for acknowledging and handling. */
  771. /* Use VBLANK interrupt */
  772. for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
  773. r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
  774. if (r) {
  775. DRM_ERROR("Failed to add crtc irq id!\n");
  776. return r;
  777. }
  778. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  779. int_params.irq_source =
  780. dc_interrupt_to_irq_source(dc, i, 0);
  781. c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
  782. c_irq_params->adev = adev;
  783. c_irq_params->irq_src = int_params.irq_source;
  784. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  785. dm_crtc_high_irq, c_irq_params);
  786. }
  787. /* Use GRPH_PFLIP interrupt */
  788. for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
  789. i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
  790. r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
  791. if (r) {
  792. DRM_ERROR("Failed to add page flip irq id!\n");
  793. return r;
  794. }
  795. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  796. int_params.irq_source =
  797. dc_interrupt_to_irq_source(dc, i, 0);
  798. c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
  799. c_irq_params->adev = adev;
  800. c_irq_params->irq_src = int_params.irq_source;
  801. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  802. dm_pflip_high_irq, c_irq_params);
  803. }
  804. /* HPD */
  805. r = amdgpu_irq_add_id(adev, client_id,
  806. VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
  807. if (r) {
  808. DRM_ERROR("Failed to add hpd irq id!\n");
  809. return r;
  810. }
  811. register_hpd_handlers(adev);
  812. return 0;
  813. }
  814. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  815. /* Register IRQ sources and initialize IRQ callbacks */
  816. static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
  817. {
  818. struct dc *dc = adev->dm.dc;
  819. struct common_irq_params *c_irq_params;
  820. struct dc_interrupt_params int_params = {0};
  821. int r;
  822. int i;
  823. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  824. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  825. /* Actions of amdgpu_irq_add_id():
  826. * 1. Register a set() function with base driver.
  827. * Base driver will call set() function to enable/disable an
  828. * interrupt in DC hardware.
  829. * 2. Register amdgpu_dm_irq_handler().
  830. * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
  831. * coming from DC hardware.
  832. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
  833. * for acknowledging and handling.
  834. * */
  835. /* Use VSTARTUP interrupt */
  836. for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
  837. i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
  838. i++) {
  839. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
  840. if (r) {
  841. DRM_ERROR("Failed to add crtc irq id!\n");
  842. return r;
  843. }
  844. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  845. int_params.irq_source =
  846. dc_interrupt_to_irq_source(dc, i, 0);
  847. c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
  848. c_irq_params->adev = adev;
  849. c_irq_params->irq_src = int_params.irq_source;
  850. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  851. dm_crtc_high_irq, c_irq_params);
  852. }
  853. /* Use GRPH_PFLIP interrupt */
  854. for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
  855. i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
  856. i++) {
  857. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
  858. if (r) {
  859. DRM_ERROR("Failed to add page flip irq id!\n");
  860. return r;
  861. }
  862. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  863. int_params.irq_source =
  864. dc_interrupt_to_irq_source(dc, i, 0);
  865. c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
  866. c_irq_params->adev = adev;
  867. c_irq_params->irq_src = int_params.irq_source;
  868. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  869. dm_pflip_high_irq, c_irq_params);
  870. }
  871. /* HPD */
  872. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
  873. &adev->hpd_irq);
  874. if (r) {
  875. DRM_ERROR("Failed to add hpd irq id!\n");
  876. return r;
  877. }
  878. register_hpd_handlers(adev);
  879. return 0;
  880. }
  881. #endif
  882. static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
  883. {
  884. int r;
  885. adev->mode_info.mode_config_initialized = true;
  886. adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
  887. adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
  888. adev->ddev->mode_config.max_width = 16384;
  889. adev->ddev->mode_config.max_height = 16384;
  890. adev->ddev->mode_config.preferred_depth = 24;
  891. adev->ddev->mode_config.prefer_shadow = 1;
  892. /* indicate support of immediate flip */
  893. adev->ddev->mode_config.async_page_flip = true;
  894. adev->ddev->mode_config.fb_base = adev->mc.aper_base;
  895. r = amdgpu_modeset_create_props(adev);
  896. if (r)
  897. return r;
  898. return 0;
  899. }
  900. #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
  901. defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
  902. static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
  903. {
  904. struct amdgpu_display_manager *dm = bl_get_data(bd);
  905. if (dc_link_set_backlight_level(dm->backlight_link,
  906. bd->props.brightness, 0, 0))
  907. return 0;
  908. else
  909. return 1;
  910. }
  911. static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
  912. {
  913. return bd->props.brightness;
  914. }
  915. static const struct backlight_ops amdgpu_dm_backlight_ops = {
  916. .get_brightness = amdgpu_dm_backlight_get_brightness,
  917. .update_status = amdgpu_dm_backlight_update_status,
  918. };
  919. void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
  920. {
  921. char bl_name[16];
  922. struct backlight_properties props = { 0 };
  923. props.max_brightness = AMDGPU_MAX_BL_LEVEL;
  924. props.type = BACKLIGHT_RAW;
  925. snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
  926. dm->adev->ddev->primary->index);
  927. dm->backlight_dev = backlight_device_register(bl_name,
  928. dm->adev->ddev->dev,
  929. dm,
  930. &amdgpu_dm_backlight_ops,
  931. &props);
  932. if (NULL == dm->backlight_dev)
  933. DRM_ERROR("DM: Backlight registration failed!\n");
  934. else
  935. DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
  936. }
  937. #endif
  938. /* In this architecture, the association
  939. * connector -> encoder -> crtc
  940. * id not really requried. The crtc and connector will hold the
  941. * display_index as an abstraction to use with DAL component
  942. *
  943. * Returns 0 on success
  944. */
  945. int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
  946. {
  947. struct amdgpu_display_manager *dm = &adev->dm;
  948. uint32_t i;
  949. struct amdgpu_connector *aconnector = NULL;
  950. struct amdgpu_encoder *aencoder = NULL;
  951. struct amdgpu_mode_info *mode_info = &adev->mode_info;
  952. uint32_t link_cnt;
  953. unsigned long possible_crtcs;
  954. link_cnt = dm->dc->caps.max_links;
  955. if (amdgpu_dm_mode_config_init(dm->adev)) {
  956. DRM_ERROR("DM: Failed to initialize mode config\n");
  957. return -1;
  958. }
  959. for (i = 0; i < dm->dc->caps.max_surfaces; i++) {
  960. mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
  961. GFP_KERNEL);
  962. if (!mode_info->planes[i]) {
  963. DRM_ERROR("KMS: Failed to allocate surface\n");
  964. goto fail_free_planes;
  965. }
  966. mode_info->planes[i]->base.type = mode_info->plane_type[i];
  967. /*
  968. * HACK: IGT tests expect that each plane can only have one
  969. * one possible CRTC. For now, set one CRTC for each
  970. * plane that is not an underlay, but still allow multiple
  971. * CRTCs for underlay planes.
  972. */
  973. possible_crtcs = 1 << i;
  974. if (i >= dm->dc->caps.max_streams)
  975. possible_crtcs = 0xff;
  976. if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
  977. DRM_ERROR("KMS: Failed to initialize plane\n");
  978. goto fail_free_planes;
  979. }
  980. }
  981. for (i = 0; i < dm->dc->caps.max_streams; i++)
  982. if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
  983. DRM_ERROR("KMS: Failed to initialize crtc\n");
  984. goto fail_free_planes;
  985. }
  986. dm->display_indexes_num = dm->dc->caps.max_streams;
  987. /* loops over all connectors on the board */
  988. for (i = 0; i < link_cnt; i++) {
  989. if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
  990. DRM_ERROR(
  991. "KMS: Cannot support more than %d display indexes\n",
  992. AMDGPU_DM_MAX_DISPLAY_INDEX);
  993. continue;
  994. }
  995. aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
  996. if (!aconnector)
  997. goto fail_free_planes;
  998. aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
  999. if (!aencoder) {
  1000. goto fail_free_connector;
  1001. }
  1002. if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
  1003. DRM_ERROR("KMS: Failed to initialize encoder\n");
  1004. goto fail_free_encoder;
  1005. }
  1006. if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
  1007. DRM_ERROR("KMS: Failed to initialize connector\n");
  1008. goto fail_free_encoder;
  1009. }
  1010. if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
  1011. amdgpu_dm_update_connector_after_detect(aconnector);
  1012. }
  1013. /* Software is initialized. Now we can register interrupt handlers. */
  1014. switch (adev->asic_type) {
  1015. case CHIP_BONAIRE:
  1016. case CHIP_HAWAII:
  1017. case CHIP_TONGA:
  1018. case CHIP_FIJI:
  1019. case CHIP_CARRIZO:
  1020. case CHIP_STONEY:
  1021. case CHIP_POLARIS11:
  1022. case CHIP_POLARIS10:
  1023. case CHIP_POLARIS12:
  1024. case CHIP_VEGA10:
  1025. if (dce110_register_irq_handlers(dm->adev)) {
  1026. DRM_ERROR("DM: Failed to initialize IRQ\n");
  1027. goto fail_free_encoder;
  1028. }
  1029. break;
  1030. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  1031. case CHIP_RAVEN:
  1032. if (dcn10_register_irq_handlers(dm->adev)) {
  1033. DRM_ERROR("DM: Failed to initialize IRQ\n");
  1034. goto fail_free_encoder;
  1035. }
  1036. break;
  1037. #endif
  1038. default:
  1039. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  1040. goto fail_free_encoder;
  1041. }
  1042. drm_mode_config_reset(dm->ddev);
  1043. return 0;
  1044. fail_free_encoder:
  1045. kfree(aencoder);
  1046. fail_free_connector:
  1047. kfree(aconnector);
  1048. fail_free_planes:
  1049. for (i = 0; i < dm->dc->caps.max_surfaces; i++)
  1050. kfree(mode_info->planes[i]);
  1051. return -1;
  1052. }
  1053. void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
  1054. {
  1055. drm_mode_config_cleanup(dm->ddev);
  1056. return;
  1057. }
  1058. /******************************************************************************
  1059. * amdgpu_display_funcs functions
  1060. *****************************************************************************/
  1061. /**
  1062. * dm_bandwidth_update - program display watermarks
  1063. *
  1064. * @adev: amdgpu_device pointer
  1065. *
  1066. * Calculate and program the display watermarks and line buffer allocation.
  1067. */
  1068. static void dm_bandwidth_update(struct amdgpu_device *adev)
  1069. {
  1070. /* TODO: implement later */
  1071. }
  1072. static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
  1073. u8 level)
  1074. {
  1075. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  1076. }
  1077. static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
  1078. {
  1079. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  1080. return 0;
  1081. }
  1082. static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
  1083. struct drm_file *filp)
  1084. {
  1085. struct mod_freesync_params freesync_params;
  1086. uint8_t num_streams;
  1087. uint8_t i;
  1088. struct amdgpu_device *adev = dev->dev_private;
  1089. int r = 0;
  1090. /* Get freesync enable flag from DRM */
  1091. num_streams = dc_get_current_stream_count(adev->dm.dc);
  1092. for (i = 0; i < num_streams; i++) {
  1093. const struct dc_stream *stream;
  1094. stream = dc_get_stream_at_index(adev->dm.dc, i);
  1095. mod_freesync_update_state(adev->dm.freesync_module,
  1096. &stream, 1, &freesync_params);
  1097. }
  1098. return r;
  1099. }
  1100. static const struct amdgpu_display_funcs dm_display_funcs = {
  1101. .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
  1102. .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
  1103. .vblank_wait = NULL,
  1104. .backlight_set_level =
  1105. dm_set_backlight_level,/* called unconditionally */
  1106. .backlight_get_level =
  1107. dm_get_backlight_level,/* called unconditionally */
  1108. .hpd_sense = NULL,/* called unconditionally */
  1109. .hpd_set_polarity = NULL, /* called unconditionally */
  1110. .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
  1111. .page_flip_get_scanoutpos =
  1112. dm_crtc_get_scanoutpos,/* called unconditionally */
  1113. .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
  1114. .add_connector = NULL, /* VBIOS parsing. DAL does it. */
  1115. .notify_freesync = amdgpu_notify_freesync,
  1116. };
  1117. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1118. static ssize_t s3_debug_store(
  1119. struct device *device,
  1120. struct device_attribute *attr,
  1121. const char *buf,
  1122. size_t count)
  1123. {
  1124. int ret;
  1125. int s3_state;
  1126. struct pci_dev *pdev = to_pci_dev(device);
  1127. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  1128. struct amdgpu_device *adev = drm_dev->dev_private;
  1129. ret = kstrtoint(buf, 0, &s3_state);
  1130. if (ret == 0) {
  1131. if (s3_state) {
  1132. dm_resume(adev);
  1133. amdgpu_dm_display_resume(adev);
  1134. drm_kms_helper_hotplug_event(adev->ddev);
  1135. } else
  1136. dm_suspend(adev);
  1137. }
  1138. return ret == 0 ? count : 0;
  1139. }
  1140. DEVICE_ATTR_WO(s3_debug);
  1141. #endif
  1142. static int dm_early_init(void *handle)
  1143. {
  1144. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1145. adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
  1146. amdgpu_dm_set_irq_funcs(adev);
  1147. switch (adev->asic_type) {
  1148. case CHIP_BONAIRE:
  1149. case CHIP_HAWAII:
  1150. adev->mode_info.num_crtc = 6;
  1151. adev->mode_info.num_hpd = 6;
  1152. adev->mode_info.num_dig = 6;
  1153. adev->mode_info.plane_type = dm_surfaces_type_default;
  1154. break;
  1155. case CHIP_FIJI:
  1156. case CHIP_TONGA:
  1157. adev->mode_info.num_crtc = 6;
  1158. adev->mode_info.num_hpd = 6;
  1159. adev->mode_info.num_dig = 7;
  1160. adev->mode_info.plane_type = dm_surfaces_type_default;
  1161. break;
  1162. case CHIP_CARRIZO:
  1163. adev->mode_info.num_crtc = 3;
  1164. adev->mode_info.num_hpd = 6;
  1165. adev->mode_info.num_dig = 9;
  1166. adev->mode_info.plane_type = dm_surfaces_type_carizzo;
  1167. break;
  1168. case CHIP_STONEY:
  1169. adev->mode_info.num_crtc = 2;
  1170. adev->mode_info.num_hpd = 6;
  1171. adev->mode_info.num_dig = 9;
  1172. adev->mode_info.plane_type = dm_surfaces_type_stoney;
  1173. break;
  1174. case CHIP_POLARIS11:
  1175. case CHIP_POLARIS12:
  1176. adev->mode_info.num_crtc = 5;
  1177. adev->mode_info.num_hpd = 5;
  1178. adev->mode_info.num_dig = 5;
  1179. adev->mode_info.plane_type = dm_surfaces_type_default;
  1180. break;
  1181. case CHIP_POLARIS10:
  1182. adev->mode_info.num_crtc = 6;
  1183. adev->mode_info.num_hpd = 6;
  1184. adev->mode_info.num_dig = 6;
  1185. adev->mode_info.plane_type = dm_surfaces_type_default;
  1186. break;
  1187. case CHIP_VEGA10:
  1188. adev->mode_info.num_crtc = 6;
  1189. adev->mode_info.num_hpd = 6;
  1190. adev->mode_info.num_dig = 6;
  1191. adev->mode_info.plane_type = dm_surfaces_type_default;
  1192. break;
  1193. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  1194. case CHIP_RAVEN:
  1195. adev->mode_info.num_crtc = 4;
  1196. adev->mode_info.num_hpd = 4;
  1197. adev->mode_info.num_dig = 4;
  1198. adev->mode_info.plane_type = dm_surfaces_type_default;
  1199. break;
  1200. #endif
  1201. default:
  1202. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  1203. return -EINVAL;
  1204. }
  1205. if (adev->mode_info.funcs == NULL)
  1206. adev->mode_info.funcs = &dm_display_funcs;
  1207. /* Note: Do NOT change adev->audio_endpt_rreg and
  1208. * adev->audio_endpt_wreg because they are initialised in
  1209. * amdgpu_device_init() */
  1210. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1211. device_create_file(
  1212. adev->ddev->dev,
  1213. &dev_attr_s3_debug);
  1214. #endif
  1215. return 0;
  1216. }
  1217. bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
  1218. {
  1219. /* TODO */
  1220. return true;
  1221. }
  1222. bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
  1223. {
  1224. /* TODO */
  1225. return true;
  1226. }