amdgpu_dm.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: AMD
  23. *
  24. */
  25. #include "dm_services_types.h"
  26. #include "dc.h"
  27. #include "vid.h"
  28. #include "amdgpu.h"
  29. #include "amdgpu_display.h"
  30. #include "atom.h"
  31. #include "amdgpu_dm.h"
  32. #include "amdgpu_dm_types.h"
  33. #include "amd_shared.h"
  34. #include "amdgpu_dm_irq.h"
  35. #include "dm_helpers.h"
  36. #include "ivsrcid/ivsrcid_vislands30.h"
  37. #include <linux/module.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/version.h>
  40. #include <drm/drm_atomic.h>
  41. #include <drm/drm_atomic_helper.h>
  42. #include <drm/drm_dp_mst_helper.h>
  43. #include "modules/inc/mod_freesync.h"
  44. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  45. #include "ivsrcid/irqsrcs_dcn_1_0.h"
  46. #include "raven1/DCN/dcn_1_0_offset.h"
  47. #include "raven1/DCN/dcn_1_0_sh_mask.h"
  48. #include "vega10/soc15ip.h"
  49. #include "soc15_common.h"
  50. #endif
  51. static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = {
  52. DRM_PLANE_TYPE_PRIMARY,
  53. DRM_PLANE_TYPE_PRIMARY,
  54. DRM_PLANE_TYPE_PRIMARY,
  55. DRM_PLANE_TYPE_PRIMARY,
  56. DRM_PLANE_TYPE_PRIMARY,
  57. DRM_PLANE_TYPE_PRIMARY,
  58. };
  59. static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = {
  60. DRM_PLANE_TYPE_PRIMARY,
  61. DRM_PLANE_TYPE_PRIMARY,
  62. DRM_PLANE_TYPE_PRIMARY,
  63. DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
  64. };
  65. static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = {
  66. DRM_PLANE_TYPE_PRIMARY,
  67. DRM_PLANE_TYPE_PRIMARY,
  68. DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
  69. };
  70. /*
  71. * dm_vblank_get_counter
  72. *
  73. * @brief
  74. * Get counter for number of vertical blanks
  75. *
  76. * @param
  77. * struct amdgpu_device *adev - [in] desired amdgpu device
  78. * int disp_idx - [in] which CRTC to get the counter from
  79. *
  80. * @return
  81. * Counter for vertical blanks
  82. */
  83. static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
  84. {
  85. if (crtc >= adev->mode_info.num_crtc)
  86. return 0;
  87. else {
  88. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  89. if (NULL == acrtc->stream) {
  90. DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
  91. return 0;
  92. }
  93. return dc_stream_get_vblank_counter(acrtc->stream);
  94. }
  95. }
  96. static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
  97. u32 *vbl, u32 *position)
  98. {
  99. uint32_t v_blank_start, v_blank_end, h_position, v_position;
  100. if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  101. return -EINVAL;
  102. else {
  103. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  104. if (NULL == acrtc->stream) {
  105. DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
  106. return 0;
  107. }
  108. /*
  109. * TODO rework base driver to use values directly.
  110. * for now parse it back into reg-format
  111. */
  112. dc_stream_get_scanoutpos(acrtc->stream,
  113. &v_blank_start,
  114. &v_blank_end,
  115. &h_position,
  116. &v_position);
  117. *position = v_position | (h_position << 16);
  118. *vbl = v_blank_start | (v_blank_end << 16);
  119. }
  120. return 0;
  121. }
  122. static bool dm_is_idle(void *handle)
  123. {
  124. /* XXX todo */
  125. return true;
  126. }
  127. static int dm_wait_for_idle(void *handle)
  128. {
  129. /* XXX todo */
  130. return 0;
  131. }
  132. static bool dm_check_soft_reset(void *handle)
  133. {
  134. return false;
  135. }
  136. static int dm_soft_reset(void *handle)
  137. {
  138. /* XXX todo */
  139. return 0;
  140. }
  141. static struct amdgpu_crtc *get_crtc_by_otg_inst(
  142. struct amdgpu_device *adev,
  143. int otg_inst)
  144. {
  145. struct drm_device *dev = adev->ddev;
  146. struct drm_crtc *crtc;
  147. struct amdgpu_crtc *amdgpu_crtc;
  148. /*
  149. * following if is check inherited from both functions where this one is
  150. * used now. Need to be checked why it could happen.
  151. */
  152. if (otg_inst == -1) {
  153. WARN_ON(1);
  154. return adev->mode_info.crtcs[0];
  155. }
  156. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  157. amdgpu_crtc = to_amdgpu_crtc(crtc);
  158. if (amdgpu_crtc->otg_inst == otg_inst)
  159. return amdgpu_crtc;
  160. }
  161. return NULL;
  162. }
  163. static void dm_pflip_high_irq(void *interrupt_params)
  164. {
  165. struct amdgpu_crtc *amdgpu_crtc;
  166. struct common_irq_params *irq_params = interrupt_params;
  167. struct amdgpu_device *adev = irq_params->adev;
  168. unsigned long flags;
  169. amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
  170. /* IRQ could occur when in initial stage */
  171. /*TODO work and BO cleanup */
  172. if (amdgpu_crtc == NULL) {
  173. DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
  174. return;
  175. }
  176. spin_lock_irqsave(&adev->ddev->event_lock, flags);
  177. if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
  178. DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
  179. amdgpu_crtc->pflip_status,
  180. AMDGPU_FLIP_SUBMITTED,
  181. amdgpu_crtc->crtc_id,
  182. amdgpu_crtc);
  183. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  184. return;
  185. }
  186. /* wakeup usersapce */
  187. if (amdgpu_crtc->event) {
  188. /* Update to correct count/ts if racing with vblank irq */
  189. drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
  190. drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
  191. /* page flip completed. clean up */
  192. amdgpu_crtc->event = NULL;
  193. } else
  194. WARN_ON(1);
  195. amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
  196. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  197. DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
  198. __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
  199. drm_crtc_vblank_put(&amdgpu_crtc->base);
  200. }
  201. static void dm_crtc_high_irq(void *interrupt_params)
  202. {
  203. struct common_irq_params *irq_params = interrupt_params;
  204. struct amdgpu_device *adev = irq_params->adev;
  205. uint8_t crtc_index = 0;
  206. struct amdgpu_crtc *acrtc;
  207. acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
  208. if (acrtc)
  209. crtc_index = acrtc->crtc_id;
  210. drm_handle_vblank(adev->ddev, crtc_index);
  211. }
  212. static int dm_set_clockgating_state(void *handle,
  213. enum amd_clockgating_state state)
  214. {
  215. return 0;
  216. }
  217. static int dm_set_powergating_state(void *handle,
  218. enum amd_powergating_state state)
  219. {
  220. return 0;
  221. }
  222. /* Prototypes of private functions */
  223. static int dm_early_init(void* handle);
  224. static void hotplug_notify_work_func(struct work_struct *work)
  225. {
  226. struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
  227. struct drm_device *dev = dm->ddev;
  228. drm_kms_helper_hotplug_event(dev);
  229. }
  230. /* Init display KMS
  231. *
  232. * Returns 0 on success
  233. */
  234. int amdgpu_dm_init(struct amdgpu_device *adev)
  235. {
  236. struct dc_init_data init_data;
  237. adev->dm.ddev = adev->ddev;
  238. adev->dm.adev = adev;
  239. DRM_INFO("DAL is enabled\n");
  240. /* Zero all the fields */
  241. memset(&init_data, 0, sizeof(init_data));
  242. /* initialize DAL's lock (for SYNC context use) */
  243. spin_lock_init(&adev->dm.dal_lock);
  244. /* initialize DAL's mutex */
  245. mutex_init(&adev->dm.dal_mutex);
  246. if(amdgpu_dm_irq_init(adev)) {
  247. DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
  248. goto error;
  249. }
  250. init_data.asic_id.chip_family = adev->family;
  251. init_data.asic_id.pci_revision_id = adev->rev_id;
  252. init_data.asic_id.hw_internal_rev = adev->external_rev_id;
  253. init_data.asic_id.vram_width = adev->mc.vram_width;
  254. /* TODO: initialize init_data.asic_id.vram_type here!!!! */
  255. init_data.asic_id.atombios_base_address =
  256. adev->mode_info.atom_context->bios;
  257. init_data.driver = adev;
  258. adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
  259. if (!adev->dm.cgs_device) {
  260. DRM_ERROR("amdgpu: failed to create cgs device.\n");
  261. goto error;
  262. }
  263. init_data.cgs_device = adev->dm.cgs_device;
  264. adev->dm.dal = NULL;
  265. init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
  266. /* Display Core create. */
  267. adev->dm.dc = dc_create(&init_data);
  268. if (!adev->dm.dc)
  269. DRM_INFO("Display Core failed to initialize!\n");
  270. INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
  271. adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
  272. if (!adev->dm.freesync_module) {
  273. DRM_ERROR(
  274. "amdgpu: failed to initialize freesync_module.\n");
  275. } else
  276. DRM_INFO("amdgpu: freesync_module init done %p.\n",
  277. adev->dm.freesync_module);
  278. if (amdgpu_dm_initialize_drm_device(adev)) {
  279. DRM_ERROR(
  280. "amdgpu: failed to initialize sw for display support.\n");
  281. goto error;
  282. }
  283. /* Update the actual used number of crtc */
  284. adev->mode_info.num_crtc = adev->dm.display_indexes_num;
  285. /* TODO: Add_display_info? */
  286. /* TODO use dynamic cursor width */
  287. adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
  288. adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
  289. if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
  290. DRM_ERROR(
  291. "amdgpu: failed to initialize sw for display support.\n");
  292. goto error;
  293. }
  294. DRM_INFO("KMS initialized.\n");
  295. return 0;
  296. error:
  297. amdgpu_dm_fini(adev);
  298. return -1;
  299. }
  300. void amdgpu_dm_fini(struct amdgpu_device *adev)
  301. {
  302. amdgpu_dm_destroy_drm_device(&adev->dm);
  303. /*
  304. * TODO: pageflip, vlank interrupt
  305. *
  306. * amdgpu_dm_irq_fini(adev);
  307. */
  308. if (adev->dm.cgs_device) {
  309. amdgpu_cgs_destroy_device(adev->dm.cgs_device);
  310. adev->dm.cgs_device = NULL;
  311. }
  312. if (adev->dm.freesync_module) {
  313. mod_freesync_destroy(adev->dm.freesync_module);
  314. adev->dm.freesync_module = NULL;
  315. }
  316. /* DC Destroy TODO: Replace destroy DAL */
  317. if (adev->dm.dc)
  318. dc_destroy(&adev->dm.dc);
  319. return;
  320. }
  321. /* moved from amdgpu_dm_kms.c */
  322. void amdgpu_dm_destroy()
  323. {
  324. }
  325. static int dm_sw_init(void *handle)
  326. {
  327. return 0;
  328. }
  329. static int dm_sw_fini(void *handle)
  330. {
  331. return 0;
  332. }
  333. static int detect_mst_link_for_all_connectors(struct drm_device *dev)
  334. {
  335. struct amdgpu_connector *aconnector;
  336. struct drm_connector *connector;
  337. int ret = 0;
  338. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  339. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  340. aconnector = to_amdgpu_connector(connector);
  341. if (aconnector->dc_link->type == dc_connection_mst_branch) {
  342. DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
  343. aconnector, aconnector->base.base.id);
  344. ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
  345. if (ret < 0) {
  346. DRM_ERROR("DM_MST: Failed to start MST\n");
  347. ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
  348. return ret;
  349. }
  350. }
  351. }
  352. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  353. return ret;
  354. }
  355. static int dm_late_init(void *handle)
  356. {
  357. struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
  358. int r = detect_mst_link_for_all_connectors(dev);
  359. return r;
  360. }
  361. static void s3_handle_mst(struct drm_device *dev, bool suspend)
  362. {
  363. struct amdgpu_connector *aconnector;
  364. struct drm_connector *connector;
  365. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  366. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  367. aconnector = to_amdgpu_connector(connector);
  368. if (aconnector->dc_link->type == dc_connection_mst_branch &&
  369. !aconnector->mst_port) {
  370. if (suspend)
  371. drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
  372. else
  373. drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
  374. }
  375. }
  376. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  377. }
  378. static int dm_hw_init(void *handle)
  379. {
  380. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  381. /* Create DAL display manager */
  382. amdgpu_dm_init(adev);
  383. amdgpu_dm_hpd_init(adev);
  384. return 0;
  385. }
  386. static int dm_hw_fini(void *handle)
  387. {
  388. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  389. amdgpu_dm_hpd_fini(adev);
  390. amdgpu_dm_irq_fini(adev);
  391. amdgpu_dm_fini(adev);
  392. return 0;
  393. }
  394. static int dm_suspend(void *handle)
  395. {
  396. struct amdgpu_device *adev = handle;
  397. struct amdgpu_display_manager *dm = &adev->dm;
  398. int ret = 0;
  399. s3_handle_mst(adev->ddev, true);
  400. amdgpu_dm_irq_suspend(adev);
  401. adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
  402. dc_set_power_state(
  403. dm->dc,
  404. DC_ACPI_CM_POWER_STATE_D3
  405. );
  406. return ret;
  407. }
  408. struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
  409. struct drm_atomic_state *state,
  410. struct drm_crtc *crtc,
  411. bool from_state_var)
  412. {
  413. uint32_t i;
  414. struct drm_connector_state *conn_state;
  415. struct drm_connector *connector;
  416. struct drm_crtc *crtc_from_state;
  417. for_each_connector_in_state(
  418. state,
  419. connector,
  420. conn_state,
  421. i) {
  422. crtc_from_state =
  423. from_state_var ?
  424. conn_state->crtc :
  425. connector->state->crtc;
  426. if (crtc_from_state == crtc)
  427. return to_amdgpu_connector(connector);
  428. }
  429. return NULL;
  430. }
  431. static int dm_resume(void *handle)
  432. {
  433. struct amdgpu_device *adev = handle;
  434. struct amdgpu_display_manager *dm = &adev->dm;
  435. /* power on hardware */
  436. dc_set_power_state(
  437. dm->dc,
  438. DC_ACPI_CM_POWER_STATE_D0
  439. );
  440. return 0;
  441. }
  442. int amdgpu_dm_display_resume(struct amdgpu_device *adev )
  443. {
  444. struct drm_device *ddev = adev->ddev;
  445. struct amdgpu_display_manager *dm = &adev->dm;
  446. struct amdgpu_connector *aconnector;
  447. struct drm_connector *connector;
  448. struct drm_crtc *crtc;
  449. struct drm_crtc_state *crtc_state;
  450. int ret = 0;
  451. int i;
  452. /* program HPD filter */
  453. dc_resume(dm->dc);
  454. /* On resume we need to rewrite the MSTM control bits to enamble MST*/
  455. s3_handle_mst(ddev, false);
  456. /*
  457. * early enable HPD Rx IRQ, should be done before set mode as short
  458. * pulse interrupts are used for MST
  459. */
  460. amdgpu_dm_irq_resume_early(adev);
  461. /* Do detection*/
  462. list_for_each_entry(connector,
  463. &ddev->mode_config.connector_list, head) {
  464. aconnector = to_amdgpu_connector(connector);
  465. /*
  466. * this is the case when traversing through already created
  467. * MST connectors, should be skipped
  468. */
  469. if (aconnector->mst_port)
  470. continue;
  471. mutex_lock(&aconnector->hpd_lock);
  472. dc_link_detect(aconnector->dc_link, false);
  473. aconnector->dc_sink = NULL;
  474. amdgpu_dm_update_connector_after_detect(aconnector);
  475. mutex_unlock(&aconnector->hpd_lock);
  476. }
  477. /* Force mode set in atomic comit */
  478. for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
  479. crtc_state->active_changed = true;
  480. ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
  481. amdgpu_dm_irq_resume_late(adev);
  482. return ret;
  483. }
  484. static const struct amd_ip_funcs amdgpu_dm_funcs = {
  485. .name = "dm",
  486. .early_init = dm_early_init,
  487. .late_init = dm_late_init,
  488. .sw_init = dm_sw_init,
  489. .sw_fini = dm_sw_fini,
  490. .hw_init = dm_hw_init,
  491. .hw_fini = dm_hw_fini,
  492. .suspend = dm_suspend,
  493. .resume = dm_resume,
  494. .is_idle = dm_is_idle,
  495. .wait_for_idle = dm_wait_for_idle,
  496. .check_soft_reset = dm_check_soft_reset,
  497. .soft_reset = dm_soft_reset,
  498. .set_clockgating_state = dm_set_clockgating_state,
  499. .set_powergating_state = dm_set_powergating_state,
  500. };
  501. const struct amdgpu_ip_block_version dm_ip_block =
  502. {
  503. .type = AMD_IP_BLOCK_TYPE_DCE,
  504. .major = 1,
  505. .minor = 0,
  506. .rev = 0,
  507. .funcs = &amdgpu_dm_funcs,
  508. };
  509. static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
  510. .fb_create = amdgpu_user_framebuffer_create,
  511. .output_poll_changed = amdgpu_output_poll_changed,
  512. .atomic_check = amdgpu_dm_atomic_check,
  513. .atomic_commit = drm_atomic_helper_commit
  514. };
  515. static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
  516. .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
  517. };
  518. void amdgpu_dm_update_connector_after_detect(
  519. struct amdgpu_connector *aconnector)
  520. {
  521. struct drm_connector *connector = &aconnector->base;
  522. struct drm_device *dev = connector->dev;
  523. const struct dc_sink *sink;
  524. /* MST handled by drm_mst framework */
  525. if (aconnector->mst_mgr.mst_state == true)
  526. return;
  527. sink = aconnector->dc_link->local_sink;
  528. /* Edid mgmt connector gets first update only in mode_valid hook and then
  529. * the connector sink is set to either fake or physical sink depends on link status.
  530. * don't do it here if u are during boot
  531. */
  532. if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
  533. && aconnector->dc_em_sink) {
  534. /* For S3 resume with headless use eml_sink to fake stream
  535. * because on resume connecotr->sink is set ti NULL
  536. */
  537. mutex_lock(&dev->mode_config.mutex);
  538. if (sink) {
  539. if (aconnector->dc_sink) {
  540. amdgpu_dm_remove_sink_from_freesync_module(
  541. connector);
  542. /* retain and release bellow are used for
  543. * bump up refcount for sink because the link don't point
  544. * to it anymore after disconnect so on next crtc to connector
  545. * reshuffle by UMD we will get into unwanted dc_sink release
  546. */
  547. if (aconnector->dc_sink != aconnector->dc_em_sink)
  548. dc_sink_release(aconnector->dc_sink);
  549. }
  550. aconnector->dc_sink = sink;
  551. amdgpu_dm_add_sink_to_freesync_module(
  552. connector, aconnector->edid);
  553. } else {
  554. amdgpu_dm_remove_sink_from_freesync_module(connector);
  555. if (!aconnector->dc_sink)
  556. aconnector->dc_sink = aconnector->dc_em_sink;
  557. else if (aconnector->dc_sink != aconnector->dc_em_sink)
  558. dc_sink_retain(aconnector->dc_sink);
  559. }
  560. mutex_unlock(&dev->mode_config.mutex);
  561. return;
  562. }
  563. /*
  564. * TODO: temporary guard to look for proper fix
  565. * if this sink is MST sink, we should not do anything
  566. */
  567. if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
  568. return;
  569. if (aconnector->dc_sink == sink) {
  570. /* We got a DP short pulse (Link Loss, DP CTS, etc...).
  571. * Do nothing!! */
  572. DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
  573. aconnector->connector_id);
  574. return;
  575. }
  576. DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
  577. aconnector->connector_id, aconnector->dc_sink, sink);
  578. mutex_lock(&dev->mode_config.mutex);
  579. /* 1. Update status of the drm connector
  580. * 2. Send an event and let userspace tell us what to do */
  581. if (sink) {
  582. /* TODO: check if we still need the S3 mode update workaround.
  583. * If yes, put it here. */
  584. if (aconnector->dc_sink)
  585. amdgpu_dm_remove_sink_from_freesync_module(
  586. connector);
  587. aconnector->dc_sink = sink;
  588. if (sink->dc_edid.length == 0)
  589. aconnector->edid = NULL;
  590. else {
  591. aconnector->edid =
  592. (struct edid *) sink->dc_edid.raw_edid;
  593. drm_mode_connector_update_edid_property(connector,
  594. aconnector->edid);
  595. }
  596. amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
  597. } else {
  598. amdgpu_dm_remove_sink_from_freesync_module(connector);
  599. drm_mode_connector_update_edid_property(connector, NULL);
  600. aconnector->num_modes = 0;
  601. aconnector->dc_sink = NULL;
  602. }
  603. mutex_unlock(&dev->mode_config.mutex);
  604. }
  605. static void handle_hpd_irq(void *param)
  606. {
  607. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  608. struct drm_connector *connector = &aconnector->base;
  609. struct drm_device *dev = connector->dev;
  610. /* In case of failure or MST no need to update connector status or notify the OS
  611. * since (for MST case) MST does this in it's own context.
  612. */
  613. mutex_lock(&aconnector->hpd_lock);
  614. if (dc_link_detect(aconnector->dc_link, false)) {
  615. amdgpu_dm_update_connector_after_detect(aconnector);
  616. drm_modeset_lock_all(dev);
  617. dm_restore_drm_connector_state(dev, connector);
  618. drm_modeset_unlock_all(dev);
  619. if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
  620. drm_kms_helper_hotplug_event(dev);
  621. }
  622. mutex_unlock(&aconnector->hpd_lock);
  623. }
  624. static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
  625. {
  626. uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
  627. uint8_t dret;
  628. bool new_irq_handled = false;
  629. int dpcd_addr;
  630. int dpcd_bytes_to_read;
  631. const int max_process_count = 30;
  632. int process_count = 0;
  633. const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
  634. if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
  635. dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
  636. /* DPCD 0x200 - 0x201 for downstream IRQ */
  637. dpcd_addr = DP_SINK_COUNT;
  638. } else {
  639. dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
  640. /* DPCD 0x2002 - 0x2005 for downstream IRQ */
  641. dpcd_addr = DP_SINK_COUNT_ESI;
  642. }
  643. dret = drm_dp_dpcd_read(
  644. &aconnector->dm_dp_aux.aux,
  645. dpcd_addr,
  646. esi,
  647. dpcd_bytes_to_read);
  648. while (dret == dpcd_bytes_to_read &&
  649. process_count < max_process_count) {
  650. uint8_t retry;
  651. dret = 0;
  652. process_count++;
  653. DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  654. /* handle HPD short pulse irq */
  655. if (aconnector->mst_mgr.mst_state)
  656. drm_dp_mst_hpd_irq(
  657. &aconnector->mst_mgr,
  658. esi,
  659. &new_irq_handled);
  660. if (new_irq_handled) {
  661. /* ACK at DPCD to notify down stream */
  662. const int ack_dpcd_bytes_to_write =
  663. dpcd_bytes_to_read - 1;
  664. for (retry = 0; retry < 3; retry++) {
  665. uint8_t wret;
  666. wret = drm_dp_dpcd_write(
  667. &aconnector->dm_dp_aux.aux,
  668. dpcd_addr + 1,
  669. &esi[1],
  670. ack_dpcd_bytes_to_write);
  671. if (wret == ack_dpcd_bytes_to_write)
  672. break;
  673. }
  674. /* check if there is new irq to be handle */
  675. dret = drm_dp_dpcd_read(
  676. &aconnector->dm_dp_aux.aux,
  677. dpcd_addr,
  678. esi,
  679. dpcd_bytes_to_read);
  680. new_irq_handled = false;
  681. } else
  682. break;
  683. }
  684. if (process_count == max_process_count)
  685. DRM_DEBUG_KMS("Loop exceeded max iterations\n");
  686. }
  687. static void handle_hpd_rx_irq(void *param)
  688. {
  689. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  690. struct drm_connector *connector = &aconnector->base;
  691. struct drm_device *dev = connector->dev;
  692. const struct dc_link *dc_link = aconnector->dc_link;
  693. bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
  694. /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
  695. * conflict, after implement i2c helper, this mutex should be
  696. * retired.
  697. */
  698. if (aconnector->dc_link->type != dc_connection_mst_branch)
  699. mutex_lock(&aconnector->hpd_lock);
  700. if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
  701. !is_mst_root_connector) {
  702. /* Downstream Port status changed. */
  703. if (dc_link_detect(aconnector->dc_link, false)) {
  704. amdgpu_dm_update_connector_after_detect(aconnector);
  705. drm_modeset_lock_all(dev);
  706. dm_restore_drm_connector_state(dev, connector);
  707. drm_modeset_unlock_all(dev);
  708. drm_kms_helper_hotplug_event(dev);
  709. }
  710. }
  711. if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
  712. (dc_link->type == dc_connection_mst_branch))
  713. dm_handle_hpd_rx_irq(aconnector);
  714. if (aconnector->dc_link->type != dc_connection_mst_branch)
  715. mutex_unlock(&aconnector->hpd_lock);
  716. }
  717. static void register_hpd_handlers(struct amdgpu_device *adev)
  718. {
  719. struct drm_device *dev = adev->ddev;
  720. struct drm_connector *connector;
  721. struct amdgpu_connector *aconnector;
  722. const struct dc_link *dc_link;
  723. struct dc_interrupt_params int_params = {0};
  724. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  725. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  726. list_for_each_entry(connector,
  727. &dev->mode_config.connector_list, head) {
  728. aconnector = to_amdgpu_connector(connector);
  729. dc_link = aconnector->dc_link;
  730. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
  731. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  732. int_params.irq_source = dc_link->irq_source_hpd;
  733. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  734. handle_hpd_irq,
  735. (void *) aconnector);
  736. }
  737. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
  738. /* Also register for DP short pulse (hpd_rx). */
  739. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  740. int_params.irq_source = dc_link->irq_source_hpd_rx;
  741. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  742. handle_hpd_rx_irq,
  743. (void *) aconnector);
  744. }
  745. }
  746. }
  747. /* Register IRQ sources and initialize IRQ callbacks */
  748. static int dce110_register_irq_handlers(struct amdgpu_device *adev)
  749. {
  750. struct dc *dc = adev->dm.dc;
  751. struct common_irq_params *c_irq_params;
  752. struct dc_interrupt_params int_params = {0};
  753. int r;
  754. int i;
  755. unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
  756. if (adev->asic_type == CHIP_VEGA10 ||
  757. adev->asic_type == CHIP_RAVEN)
  758. client_id = AMDGPU_IH_CLIENTID_DCE;
  759. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  760. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  761. /* Actions of amdgpu_irq_add_id():
  762. * 1. Register a set() function with base driver.
  763. * Base driver will call set() function to enable/disable an
  764. * interrupt in DC hardware.
  765. * 2. Register amdgpu_dm_irq_handler().
  766. * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
  767. * coming from DC hardware.
  768. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
  769. * for acknowledging and handling. */
  770. /* Use VBLANK interrupt */
  771. for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
  772. r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
  773. if (r) {
  774. DRM_ERROR("Failed to add crtc irq id!\n");
  775. return r;
  776. }
  777. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  778. int_params.irq_source =
  779. dc_interrupt_to_irq_source(dc, i, 0);
  780. c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
  781. c_irq_params->adev = adev;
  782. c_irq_params->irq_src = int_params.irq_source;
  783. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  784. dm_crtc_high_irq, c_irq_params);
  785. }
  786. /* Use GRPH_PFLIP interrupt */
  787. for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
  788. i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
  789. r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
  790. if (r) {
  791. DRM_ERROR("Failed to add page flip irq id!\n");
  792. return r;
  793. }
  794. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  795. int_params.irq_source =
  796. dc_interrupt_to_irq_source(dc, i, 0);
  797. c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
  798. c_irq_params->adev = adev;
  799. c_irq_params->irq_src = int_params.irq_source;
  800. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  801. dm_pflip_high_irq, c_irq_params);
  802. }
  803. /* HPD */
  804. r = amdgpu_irq_add_id(adev, client_id,
  805. VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
  806. if (r) {
  807. DRM_ERROR("Failed to add hpd irq id!\n");
  808. return r;
  809. }
  810. register_hpd_handlers(adev);
  811. return 0;
  812. }
  813. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  814. /* Register IRQ sources and initialize IRQ callbacks */
  815. static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
  816. {
  817. struct dc *dc = adev->dm.dc;
  818. struct common_irq_params *c_irq_params;
  819. struct dc_interrupt_params int_params = {0};
  820. int r;
  821. int i;
  822. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  823. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  824. /* Actions of amdgpu_irq_add_id():
  825. * 1. Register a set() function with base driver.
  826. * Base driver will call set() function to enable/disable an
  827. * interrupt in DC hardware.
  828. * 2. Register amdgpu_dm_irq_handler().
  829. * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
  830. * coming from DC hardware.
  831. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
  832. * for acknowledging and handling.
  833. * */
  834. /* Use VSTARTUP interrupt */
  835. for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
  836. i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
  837. i++) {
  838. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
  839. if (r) {
  840. DRM_ERROR("Failed to add crtc irq id!\n");
  841. return r;
  842. }
  843. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  844. int_params.irq_source =
  845. dc_interrupt_to_irq_source(dc, i, 0);
  846. c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
  847. c_irq_params->adev = adev;
  848. c_irq_params->irq_src = int_params.irq_source;
  849. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  850. dm_crtc_high_irq, c_irq_params);
  851. }
  852. /* Use GRPH_PFLIP interrupt */
  853. for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
  854. i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
  855. i++) {
  856. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
  857. if (r) {
  858. DRM_ERROR("Failed to add page flip irq id!\n");
  859. return r;
  860. }
  861. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  862. int_params.irq_source =
  863. dc_interrupt_to_irq_source(dc, i, 0);
  864. c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
  865. c_irq_params->adev = adev;
  866. c_irq_params->irq_src = int_params.irq_source;
  867. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  868. dm_pflip_high_irq, c_irq_params);
  869. }
  870. /* HPD */
  871. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
  872. &adev->hpd_irq);
  873. if (r) {
  874. DRM_ERROR("Failed to add hpd irq id!\n");
  875. return r;
  876. }
  877. register_hpd_handlers(adev);
  878. return 0;
  879. }
  880. #endif
  881. static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
  882. {
  883. int r;
  884. adev->mode_info.mode_config_initialized = true;
  885. adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
  886. adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
  887. adev->ddev->mode_config.max_width = 16384;
  888. adev->ddev->mode_config.max_height = 16384;
  889. adev->ddev->mode_config.preferred_depth = 24;
  890. adev->ddev->mode_config.prefer_shadow = 1;
  891. /* indicate support of immediate flip */
  892. adev->ddev->mode_config.async_page_flip = true;
  893. adev->ddev->mode_config.fb_base = adev->mc.aper_base;
  894. r = amdgpu_modeset_create_props(adev);
  895. if (r)
  896. return r;
  897. return 0;
  898. }
  899. #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
  900. defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
  901. static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
  902. {
  903. struct amdgpu_display_manager *dm = bl_get_data(bd);
  904. if (dc_link_set_backlight_level(dm->backlight_link,
  905. bd->props.brightness, 0, 0))
  906. return 0;
  907. else
  908. return 1;
  909. }
  910. static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
  911. {
  912. return bd->props.brightness;
  913. }
  914. static const struct backlight_ops amdgpu_dm_backlight_ops = {
  915. .get_brightness = amdgpu_dm_backlight_get_brightness,
  916. .update_status = amdgpu_dm_backlight_update_status,
  917. };
  918. void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
  919. {
  920. char bl_name[16];
  921. struct backlight_properties props = { 0 };
  922. props.max_brightness = AMDGPU_MAX_BL_LEVEL;
  923. props.type = BACKLIGHT_RAW;
  924. snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
  925. dm->adev->ddev->primary->index);
  926. dm->backlight_dev = backlight_device_register(bl_name,
  927. dm->adev->ddev->dev,
  928. dm,
  929. &amdgpu_dm_backlight_ops,
  930. &props);
  931. if (NULL == dm->backlight_dev)
  932. DRM_ERROR("DM: Backlight registration failed!\n");
  933. else
  934. DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
  935. }
  936. #endif
  937. /* In this architecture, the association
  938. * connector -> encoder -> crtc
  939. * id not really requried. The crtc and connector will hold the
  940. * display_index as an abstraction to use with DAL component
  941. *
  942. * Returns 0 on success
  943. */
  944. int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
  945. {
  946. struct amdgpu_display_manager *dm = &adev->dm;
  947. uint32_t i;
  948. struct amdgpu_connector *aconnector = NULL;
  949. struct amdgpu_encoder *aencoder = NULL;
  950. struct amdgpu_mode_info *mode_info = &adev->mode_info;
  951. uint32_t link_cnt;
  952. unsigned long possible_crtcs;
  953. link_cnt = dm->dc->caps.max_links;
  954. if (amdgpu_dm_mode_config_init(dm->adev)) {
  955. DRM_ERROR("DM: Failed to initialize mode config\n");
  956. return -1;
  957. }
  958. for (i = 0; i < dm->dc->caps.max_surfaces; i++) {
  959. mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
  960. GFP_KERNEL);
  961. if (!mode_info->planes[i]) {
  962. DRM_ERROR("KMS: Failed to allocate surface\n");
  963. goto fail_free_planes;
  964. }
  965. mode_info->planes[i]->base.type = mode_info->plane_type[i];
  966. /*
  967. * HACK: IGT tests expect that each plane can only have one
  968. * one possible CRTC. For now, set one CRTC for each
  969. * plane that is not an underlay, but still allow multiple
  970. * CRTCs for underlay planes.
  971. */
  972. possible_crtcs = 1 << i;
  973. if (i >= dm->dc->caps.max_streams)
  974. possible_crtcs = 0xff;
  975. if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
  976. DRM_ERROR("KMS: Failed to initialize plane\n");
  977. goto fail_free_planes;
  978. }
  979. }
  980. for (i = 0; i < dm->dc->caps.max_streams; i++)
  981. if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
  982. DRM_ERROR("KMS: Failed to initialize crtc\n");
  983. goto fail_free_planes;
  984. }
  985. dm->display_indexes_num = dm->dc->caps.max_streams;
  986. /* loops over all connectors on the board */
  987. for (i = 0; i < link_cnt; i++) {
  988. if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
  989. DRM_ERROR(
  990. "KMS: Cannot support more than %d display indexes\n",
  991. AMDGPU_DM_MAX_DISPLAY_INDEX);
  992. continue;
  993. }
  994. aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
  995. if (!aconnector)
  996. goto fail_free_planes;
  997. aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
  998. if (!aencoder) {
  999. goto fail_free_connector;
  1000. }
  1001. if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
  1002. DRM_ERROR("KMS: Failed to initialize encoder\n");
  1003. goto fail_free_encoder;
  1004. }
  1005. if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
  1006. DRM_ERROR("KMS: Failed to initialize connector\n");
  1007. goto fail_free_encoder;
  1008. }
  1009. if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
  1010. amdgpu_dm_update_connector_after_detect(aconnector);
  1011. }
  1012. /* Software is initialized. Now we can register interrupt handlers. */
  1013. switch (adev->asic_type) {
  1014. case CHIP_BONAIRE:
  1015. case CHIP_HAWAII:
  1016. case CHIP_TONGA:
  1017. case CHIP_FIJI:
  1018. case CHIP_CARRIZO:
  1019. case CHIP_STONEY:
  1020. case CHIP_POLARIS11:
  1021. case CHIP_POLARIS10:
  1022. case CHIP_POLARIS12:
  1023. case CHIP_VEGA10:
  1024. if (dce110_register_irq_handlers(dm->adev)) {
  1025. DRM_ERROR("DM: Failed to initialize IRQ\n");
  1026. goto fail_free_encoder;
  1027. }
  1028. break;
  1029. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  1030. case CHIP_RAVEN:
  1031. if (dcn10_register_irq_handlers(dm->adev)) {
  1032. DRM_ERROR("DM: Failed to initialize IRQ\n");
  1033. goto fail_free_encoder;
  1034. }
  1035. break;
  1036. #endif
  1037. default:
  1038. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  1039. goto fail_free_encoder;
  1040. }
  1041. drm_mode_config_reset(dm->ddev);
  1042. return 0;
  1043. fail_free_encoder:
  1044. kfree(aencoder);
  1045. fail_free_connector:
  1046. kfree(aconnector);
  1047. fail_free_planes:
  1048. for (i = 0; i < dm->dc->caps.max_surfaces; i++)
  1049. kfree(mode_info->planes[i]);
  1050. return -1;
  1051. }
  1052. void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
  1053. {
  1054. drm_mode_config_cleanup(dm->ddev);
  1055. return;
  1056. }
  1057. /******************************************************************************
  1058. * amdgpu_display_funcs functions
  1059. *****************************************************************************/
  1060. /**
  1061. * dm_bandwidth_update - program display watermarks
  1062. *
  1063. * @adev: amdgpu_device pointer
  1064. *
  1065. * Calculate and program the display watermarks and line buffer allocation.
  1066. */
  1067. static void dm_bandwidth_update(struct amdgpu_device *adev)
  1068. {
  1069. /* TODO: implement later */
  1070. }
  1071. static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
  1072. u8 level)
  1073. {
  1074. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  1075. }
  1076. static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
  1077. {
  1078. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  1079. return 0;
  1080. }
  1081. static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
  1082. struct drm_file *filp)
  1083. {
  1084. struct mod_freesync_params freesync_params;
  1085. uint8_t num_streams;
  1086. uint8_t i;
  1087. struct amdgpu_device *adev = dev->dev_private;
  1088. int r = 0;
  1089. /* Get freesync enable flag from DRM */
  1090. num_streams = dc_get_current_stream_count(adev->dm.dc);
  1091. for (i = 0; i < num_streams; i++) {
  1092. const struct dc_stream *stream;
  1093. stream = dc_get_stream_at_index(adev->dm.dc, i);
  1094. mod_freesync_update_state(adev->dm.freesync_module,
  1095. &stream, 1, &freesync_params);
  1096. }
  1097. return r;
  1098. }
  1099. static const struct amdgpu_display_funcs dm_display_funcs = {
  1100. .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
  1101. .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
  1102. .vblank_wait = NULL,
  1103. .backlight_set_level =
  1104. dm_set_backlight_level,/* called unconditionally */
  1105. .backlight_get_level =
  1106. dm_get_backlight_level,/* called unconditionally */
  1107. .hpd_sense = NULL,/* called unconditionally */
  1108. .hpd_set_polarity = NULL, /* called unconditionally */
  1109. .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
  1110. .page_flip_get_scanoutpos =
  1111. dm_crtc_get_scanoutpos,/* called unconditionally */
  1112. .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
  1113. .add_connector = NULL, /* VBIOS parsing. DAL does it. */
  1114. .notify_freesync = amdgpu_notify_freesync,
  1115. };
  1116. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1117. static ssize_t s3_debug_store(
  1118. struct device *device,
  1119. struct device_attribute *attr,
  1120. const char *buf,
  1121. size_t count)
  1122. {
  1123. int ret;
  1124. int s3_state;
  1125. struct pci_dev *pdev = to_pci_dev(device);
  1126. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  1127. struct amdgpu_device *adev = drm_dev->dev_private;
  1128. ret = kstrtoint(buf, 0, &s3_state);
  1129. if (ret == 0) {
  1130. if (s3_state) {
  1131. dm_resume(adev);
  1132. amdgpu_dm_display_resume(adev);
  1133. drm_kms_helper_hotplug_event(adev->ddev);
  1134. } else
  1135. dm_suspend(adev);
  1136. }
  1137. return ret == 0 ? count : 0;
  1138. }
  1139. DEVICE_ATTR_WO(s3_debug);
  1140. #endif
  1141. static int dm_early_init(void *handle)
  1142. {
  1143. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1144. adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
  1145. amdgpu_dm_set_irq_funcs(adev);
  1146. switch (adev->asic_type) {
  1147. case CHIP_BONAIRE:
  1148. case CHIP_HAWAII:
  1149. adev->mode_info.num_crtc = 6;
  1150. adev->mode_info.num_hpd = 6;
  1151. adev->mode_info.num_dig = 6;
  1152. adev->mode_info.plane_type = dm_surfaces_type_default;
  1153. break;
  1154. case CHIP_FIJI:
  1155. case CHIP_TONGA:
  1156. adev->mode_info.num_crtc = 6;
  1157. adev->mode_info.num_hpd = 6;
  1158. adev->mode_info.num_dig = 7;
  1159. adev->mode_info.plane_type = dm_surfaces_type_default;
  1160. break;
  1161. case CHIP_CARRIZO:
  1162. adev->mode_info.num_crtc = 3;
  1163. adev->mode_info.num_hpd = 6;
  1164. adev->mode_info.num_dig = 9;
  1165. adev->mode_info.plane_type = dm_surfaces_type_carizzo;
  1166. break;
  1167. case CHIP_STONEY:
  1168. adev->mode_info.num_crtc = 2;
  1169. adev->mode_info.num_hpd = 6;
  1170. adev->mode_info.num_dig = 9;
  1171. adev->mode_info.plane_type = dm_surfaces_type_stoney;
  1172. break;
  1173. case CHIP_POLARIS11:
  1174. case CHIP_POLARIS12:
  1175. adev->mode_info.num_crtc = 5;
  1176. adev->mode_info.num_hpd = 5;
  1177. adev->mode_info.num_dig = 5;
  1178. adev->mode_info.plane_type = dm_surfaces_type_default;
  1179. break;
  1180. case CHIP_POLARIS10:
  1181. adev->mode_info.num_crtc = 6;
  1182. adev->mode_info.num_hpd = 6;
  1183. adev->mode_info.num_dig = 6;
  1184. adev->mode_info.plane_type = dm_surfaces_type_default;
  1185. break;
  1186. case CHIP_VEGA10:
  1187. adev->mode_info.num_crtc = 6;
  1188. adev->mode_info.num_hpd = 6;
  1189. adev->mode_info.num_dig = 6;
  1190. adev->mode_info.plane_type = dm_surfaces_type_default;
  1191. break;
  1192. #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  1193. case CHIP_RAVEN:
  1194. adev->mode_info.num_crtc = 4;
  1195. adev->mode_info.num_hpd = 4;
  1196. adev->mode_info.num_dig = 4;
  1197. adev->mode_info.plane_type = dm_surfaces_type_default;
  1198. break;
  1199. #endif
  1200. default:
  1201. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  1202. return -EINVAL;
  1203. }
  1204. if (adev->mode_info.funcs == NULL)
  1205. adev->mode_info.funcs = &dm_display_funcs;
  1206. /* Note: Do NOT change adev->audio_endpt_rreg and
  1207. * adev->audio_endpt_wreg because they are initialised in
  1208. * amdgpu_device_init() */
  1209. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1210. device_create_file(
  1211. adev->ddev->dev,
  1212. &dev_attr_s3_debug);
  1213. #endif
  1214. return 0;
  1215. }
  1216. bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
  1217. {
  1218. /* TODO */
  1219. return true;
  1220. }
  1221. bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
  1222. {
  1223. /* TODO */
  1224. return true;
  1225. }