amdgpu_dm.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: AMD
  23. *
  24. */
  25. #include "dm_services_types.h"
  26. #include "dc.h"
  27. #include "vid.h"
  28. #include "amdgpu.h"
  29. #include "atom.h"
  30. #include "amdgpu_dm.h"
  31. #include "amdgpu_dm_types.h"
  32. #include "amd_shared.h"
  33. #include "amdgpu_dm_irq.h"
  34. #include "dm_helpers.h"
  35. #include "ivsrcid/ivsrcid_vislands30.h"
  36. #include <linux/module.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/version.h>
  39. #include <drm/drm_atomic.h>
  40. #include <drm/drm_atomic_helper.h>
  41. #include <drm/drm_dp_mst_helper.h>
  42. #include "modules/inc/mod_freesync.h"
  43. /*
  44. * dm_vblank_get_counter
  45. *
  46. * @brief
  47. * Get counter for number of vertical blanks
  48. *
  49. * @param
  50. * struct amdgpu_device *adev - [in] desired amdgpu device
  51. * int disp_idx - [in] which CRTC to get the counter from
  52. *
  53. * @return
  54. * Counter for vertical blanks
  55. */
  56. static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
  57. {
  58. if (crtc >= adev->mode_info.num_crtc)
  59. return 0;
  60. else {
  61. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  62. if (NULL == acrtc->target) {
  63. DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
  64. return 0;
  65. }
  66. return dc_target_get_vblank_counter(acrtc->target);
  67. }
  68. }
  69. static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
  70. u32 *vbl, u32 *position)
  71. {
  72. if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  73. return -EINVAL;
  74. else {
  75. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  76. if (NULL == acrtc->target) {
  77. DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
  78. return 0;
  79. }
  80. return dc_target_get_scanoutpos(acrtc->target, vbl, position);
  81. }
  82. return 0;
  83. }
  84. static bool dm_is_idle(void *handle)
  85. {
  86. /* XXX todo */
  87. return true;
  88. }
  89. static int dm_wait_for_idle(void *handle)
  90. {
  91. /* XXX todo */
  92. return 0;
  93. }
  94. static bool dm_check_soft_reset(void *handle)
  95. {
  96. return false;
  97. }
  98. static int dm_soft_reset(void *handle)
  99. {
  100. /* XXX todo */
  101. return 0;
  102. }
  103. static struct amdgpu_crtc *get_crtc_by_otg_inst(
  104. struct amdgpu_device *adev,
  105. int otg_inst)
  106. {
  107. struct drm_device *dev = adev->ddev;
  108. struct drm_crtc *crtc;
  109. struct amdgpu_crtc *amdgpu_crtc;
  110. /*
  111. * following if is check inherited from both functions where this one is
  112. * used now. Need to be checked why it could happen.
  113. */
  114. if (otg_inst == -1) {
  115. WARN_ON(1);
  116. return adev->mode_info.crtcs[0];
  117. }
  118. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  119. amdgpu_crtc = to_amdgpu_crtc(crtc);
  120. if (amdgpu_crtc->otg_inst == otg_inst)
  121. return amdgpu_crtc;
  122. }
  123. return NULL;
  124. }
  125. static void dm_pflip_high_irq(void *interrupt_params)
  126. {
  127. struct amdgpu_flip_work *works;
  128. struct amdgpu_crtc *amdgpu_crtc;
  129. struct common_irq_params *irq_params = interrupt_params;
  130. struct amdgpu_device *adev = irq_params->adev;
  131. unsigned long flags;
  132. amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
  133. /* IRQ could occur when in initial stage */
  134. /*TODO work and BO cleanup */
  135. if (amdgpu_crtc == NULL) {
  136. DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
  137. return;
  138. }
  139. spin_lock_irqsave(&adev->ddev->event_lock, flags);
  140. works = amdgpu_crtc->pflip_works;
  141. if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
  142. DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
  143. amdgpu_crtc->pflip_status,
  144. AMDGPU_FLIP_SUBMITTED,
  145. amdgpu_crtc->crtc_id,
  146. amdgpu_crtc);
  147. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  148. return;
  149. }
  150. /* page flip completed. clean up */
  151. amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
  152. amdgpu_crtc->pflip_works = NULL;
  153. /* wakeup usersapce */
  154. if (works->event)
  155. drm_crtc_send_vblank_event(&amdgpu_crtc->base,
  156. works->event);
  157. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  158. DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n",
  159. __func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works);
  160. drm_crtc_vblank_put(&amdgpu_crtc->base);
  161. schedule_work(&works->unpin_work);
  162. }
  163. static void dm_crtc_high_irq(void *interrupt_params)
  164. {
  165. struct common_irq_params *irq_params = interrupt_params;
  166. struct amdgpu_device *adev = irq_params->adev;
  167. uint8_t crtc_index = 0;
  168. struct amdgpu_crtc *acrtc;
  169. acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
  170. if (acrtc)
  171. crtc_index = acrtc->crtc_id;
  172. drm_handle_vblank(adev->ddev, crtc_index);
  173. }
  174. static int dm_set_clockgating_state(void *handle,
  175. enum amd_clockgating_state state)
  176. {
  177. return 0;
  178. }
  179. static int dm_set_powergating_state(void *handle,
  180. enum amd_powergating_state state)
  181. {
  182. return 0;
  183. }
  184. /* Prototypes of private functions */
  185. static int dm_early_init(void* handle);
  186. static void hotplug_notify_work_func(struct work_struct *work)
  187. {
  188. struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
  189. struct drm_device *dev = dm->ddev;
  190. drm_kms_helper_hotplug_event(dev);
  191. }
  192. /* Init display KMS
  193. *
  194. * Returns 0 on success
  195. */
  196. int amdgpu_dm_init(struct amdgpu_device *adev)
  197. {
  198. struct dc_init_data init_data;
  199. adev->dm.ddev = adev->ddev;
  200. adev->dm.adev = adev;
  201. DRM_INFO("DAL is enabled\n");
  202. /* Zero all the fields */
  203. memset(&init_data, 0, sizeof(init_data));
  204. /* initialize DAL's lock (for SYNC context use) */
  205. spin_lock_init(&adev->dm.dal_lock);
  206. /* initialize DAL's mutex */
  207. mutex_init(&adev->dm.dal_mutex);
  208. if(amdgpu_dm_irq_init(adev)) {
  209. DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
  210. goto error;
  211. }
  212. init_data.asic_id.chip_family = adev->family;
  213. init_data.asic_id.pci_revision_id = adev->rev_id;
  214. init_data.asic_id.hw_internal_rev = adev->external_rev_id;
  215. init_data.asic_id.vram_width = adev->mc.vram_width;
  216. /* TODO: initialize init_data.asic_id.vram_type here!!!! */
  217. init_data.asic_id.atombios_base_address =
  218. adev->mode_info.atom_context->bios;
  219. init_data.driver = adev;
  220. adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
  221. if (!adev->dm.cgs_device) {
  222. DRM_ERROR("amdgpu: failed to create cgs device.\n");
  223. goto error;
  224. }
  225. init_data.cgs_device = adev->dm.cgs_device;
  226. adev->dm.dal = NULL;
  227. init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
  228. /* Display Core create. */
  229. adev->dm.dc = dc_create(&init_data);
  230. if (!adev->dm.dc)
  231. DRM_INFO("Display Core failed to initialize!\n");
  232. INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
  233. adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
  234. if (!adev->dm.freesync_module) {
  235. DRM_ERROR(
  236. "amdgpu: failed to initialize freesync_module.\n");
  237. } else
  238. DRM_INFO("amdgpu: freesync_module init done %p.\n",
  239. adev->dm.freesync_module);
  240. if (amdgpu_dm_initialize_drm_device(adev)) {
  241. DRM_ERROR(
  242. "amdgpu: failed to initialize sw for display support.\n");
  243. goto error;
  244. }
  245. /* Update the actual used number of crtc */
  246. adev->mode_info.num_crtc = adev->dm.display_indexes_num;
  247. /* TODO: Add_display_info? */
  248. /* TODO use dynamic cursor width */
  249. adev->ddev->mode_config.cursor_width = 128;
  250. adev->ddev->mode_config.cursor_height = 128;
  251. if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
  252. DRM_ERROR(
  253. "amdgpu: failed to initialize sw for display support.\n");
  254. goto error;
  255. }
  256. DRM_INFO("KMS initialized.\n");
  257. return 0;
  258. error:
  259. amdgpu_dm_fini(adev);
  260. return -1;
  261. }
  262. void amdgpu_dm_fini(struct amdgpu_device *adev)
  263. {
  264. amdgpu_dm_destroy_drm_device(&adev->dm);
  265. /*
  266. * TODO: pageflip, vlank interrupt
  267. *
  268. * amdgpu_dm_irq_fini(adev);
  269. */
  270. if (adev->dm.cgs_device) {
  271. amdgpu_cgs_destroy_device(adev->dm.cgs_device);
  272. adev->dm.cgs_device = NULL;
  273. }
  274. if (adev->dm.freesync_module) {
  275. mod_freesync_destroy(adev->dm.freesync_module);
  276. adev->dm.freesync_module = NULL;
  277. }
  278. /* DC Destroy TODO: Replace destroy DAL */
  279. {
  280. dc_destroy(&adev->dm.dc);
  281. }
  282. return;
  283. }
  284. /* moved from amdgpu_dm_kms.c */
  285. void amdgpu_dm_destroy()
  286. {
  287. }
  288. static int dm_sw_init(void *handle)
  289. {
  290. return 0;
  291. }
  292. static int dm_sw_fini(void *handle)
  293. {
  294. return 0;
  295. }
  296. static int detect_mst_link_for_all_connectors(struct drm_device *dev)
  297. {
  298. struct amdgpu_connector *aconnector;
  299. struct drm_connector *connector;
  300. int ret = 0;
  301. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  302. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  303. aconnector = to_amdgpu_connector(connector);
  304. if (aconnector->dc_link->type == dc_connection_mst_branch) {
  305. DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
  306. aconnector, aconnector->base.base.id);
  307. ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
  308. if (ret < 0) {
  309. DRM_ERROR("DM_MST: Failed to start MST\n");
  310. ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
  311. return ret;
  312. }
  313. }
  314. }
  315. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  316. return ret;
  317. }
  318. static int dm_late_init(void *handle)
  319. {
  320. struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
  321. int r = detect_mst_link_for_all_connectors(dev);
  322. return r;
  323. }
  324. static void s3_handle_mst(struct drm_device *dev, bool suspend)
  325. {
  326. struct amdgpu_connector *aconnector;
  327. struct drm_connector *connector;
  328. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  329. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  330. aconnector = to_amdgpu_connector(connector);
  331. if (aconnector->dc_link->type == dc_connection_mst_branch &&
  332. !aconnector->mst_port) {
  333. if (suspend)
  334. drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
  335. else
  336. drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
  337. }
  338. }
  339. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  340. }
  341. static int dm_hw_init(void *handle)
  342. {
  343. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  344. /* Create DAL display manager */
  345. amdgpu_dm_init(adev);
  346. amdgpu_dm_hpd_init(adev);
  347. return 0;
  348. }
  349. static int dm_hw_fini(void *handle)
  350. {
  351. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  352. amdgpu_dm_hpd_fini(adev);
  353. amdgpu_dm_irq_fini(adev);
  354. return 0;
  355. }
  356. static int dm_suspend(void *handle)
  357. {
  358. struct amdgpu_device *adev = handle;
  359. struct amdgpu_display_manager *dm = &adev->dm;
  360. int ret = 0;
  361. struct drm_crtc *crtc;
  362. s3_handle_mst(adev->ddev, true);
  363. /* flash all pending vblank events and turn interrupt off
  364. * before disabling CRTCs. They will be enabled back in
  365. * dm_display_resume
  366. */
  367. drm_modeset_lock_all(adev->ddev);
  368. list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) {
  369. struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
  370. if (acrtc->target)
  371. drm_crtc_vblank_off(crtc);
  372. }
  373. drm_modeset_unlock_all(adev->ddev);
  374. amdgpu_dm_irq_suspend(adev);
  375. dc_set_power_state(
  376. dm->dc,
  377. DC_ACPI_CM_POWER_STATE_D3,
  378. DC_VIDEO_POWER_SUSPEND);
  379. return ret;
  380. }
  381. struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
  382. struct drm_atomic_state *state,
  383. struct drm_crtc *crtc,
  384. bool from_state_var)
  385. {
  386. uint32_t i;
  387. struct drm_connector_state *conn_state;
  388. struct drm_connector *connector;
  389. struct drm_crtc *crtc_from_state;
  390. for_each_connector_in_state(
  391. state,
  392. connector,
  393. conn_state,
  394. i) {
  395. crtc_from_state =
  396. from_state_var ?
  397. conn_state->crtc :
  398. connector->state->crtc;
  399. if (crtc_from_state == crtc)
  400. return to_amdgpu_connector(connector);
  401. }
  402. return NULL;
  403. }
  404. static int dm_display_resume(struct drm_device *ddev)
  405. {
  406. int ret = 0;
  407. struct drm_connector *connector;
  408. struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
  409. struct drm_plane *plane;
  410. struct drm_crtc *crtc;
  411. struct amdgpu_connector *aconnector;
  412. struct drm_connector_state *conn_state;
  413. if (!state)
  414. return ENOMEM;
  415. state->acquire_ctx = ddev->mode_config.acquire_ctx;
  416. /* Construct an atomic state to restore previous display setting */
  417. /*
  418. * Attach connectors to drm_atomic_state
  419. * Should be done in the first place in order to make connectors
  420. * available in state during crtc state processing. It is used for
  421. * making decision if crtc should be disabled in case sink got
  422. * disconnected.
  423. *
  424. * Connectors state crtc with NULL dc_sink should be cleared, because it
  425. * will fail validation during commit
  426. */
  427. list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
  428. aconnector = to_amdgpu_connector(connector);
  429. conn_state = drm_atomic_get_connector_state(state, connector);
  430. ret = PTR_ERR_OR_ZERO(conn_state);
  431. if (ret)
  432. goto err;
  433. }
  434. /* Attach crtcs to drm_atomic_state*/
  435. list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
  436. struct drm_crtc_state *crtc_state =
  437. drm_atomic_get_crtc_state(state, crtc);
  438. ret = PTR_ERR_OR_ZERO(crtc_state);
  439. if (ret)
  440. goto err;
  441. /* force a restore */
  442. crtc_state->mode_changed = true;
  443. }
  444. /* Attach planes to drm_atomic_state */
  445. list_for_each_entry(plane, &ddev->mode_config.plane_list, head) {
  446. struct drm_crtc *crtc;
  447. struct drm_gem_object *obj;
  448. struct drm_framebuffer *fb;
  449. struct amdgpu_framebuffer *afb;
  450. struct amdgpu_bo *rbo;
  451. int r;
  452. struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane);
  453. ret = PTR_ERR_OR_ZERO(plane_state);
  454. if (ret)
  455. goto err;
  456. crtc = plane_state->crtc;
  457. fb = plane_state->fb;
  458. if (!crtc || !crtc->state || !crtc->state->active)
  459. continue;
  460. if (!fb) {
  461. DRM_DEBUG_KMS("No FB bound\n");
  462. return 0;
  463. }
  464. /*
  465. * Pin back the front buffers, cursor buffer was already pinned
  466. * back in amdgpu_resume_kms
  467. */
  468. afb = to_amdgpu_framebuffer(fb);
  469. obj = afb->obj;
  470. rbo = gem_to_amdgpu_bo(obj);
  471. r = amdgpu_bo_reserve(rbo, false);
  472. if (unlikely(r != 0))
  473. return r;
  474. r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
  475. amdgpu_bo_unreserve(rbo);
  476. if (unlikely(r != 0)) {
  477. DRM_ERROR("Failed to pin framebuffer\n");
  478. return r;
  479. }
  480. }
  481. /* Call commit internally with the state we just constructed */
  482. ret = drm_atomic_commit(state);
  483. if (!ret)
  484. return 0;
  485. err:
  486. DRM_ERROR("Restoring old state failed with %i\n", ret);
  487. drm_atomic_state_put(state);
  488. return ret;
  489. }
  490. static int dm_resume(void *handle)
  491. {
  492. struct amdgpu_device *adev = handle;
  493. struct amdgpu_display_manager *dm = &adev->dm;
  494. /* power on hardware */
  495. dc_set_power_state(
  496. dm->dc,
  497. DC_ACPI_CM_POWER_STATE_D0,
  498. DC_VIDEO_POWER_ON);
  499. return 0;
  500. }
  501. int amdgpu_dm_display_resume(struct amdgpu_device *adev )
  502. {
  503. struct drm_device *ddev = adev->ddev;
  504. struct amdgpu_display_manager *dm = &adev->dm;
  505. struct amdgpu_connector *aconnector;
  506. struct drm_connector *connector;
  507. int ret = 0;
  508. struct drm_crtc *crtc;
  509. /* program HPD filter */
  510. dc_resume(dm->dc);
  511. /* On resume we need to rewrite the MSTM control bits to enamble MST*/
  512. s3_handle_mst(ddev, false);
  513. /*
  514. * early enable HPD Rx IRQ, should be done before set mode as short
  515. * pulse interrupts are used for MST
  516. */
  517. amdgpu_dm_irq_resume_early(adev);
  518. drm_modeset_lock_all(ddev);
  519. list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
  520. struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
  521. if (acrtc->target)
  522. drm_crtc_vblank_on(crtc);
  523. }
  524. drm_modeset_unlock_all(ddev);
  525. /* Do detection*/
  526. list_for_each_entry(connector,
  527. &ddev->mode_config.connector_list, head) {
  528. aconnector = to_amdgpu_connector(connector);
  529. /*
  530. * this is the case when traversing through already created
  531. * MST connectors, should be skipped
  532. */
  533. if (aconnector->mst_port)
  534. continue;
  535. dc_link_detect(aconnector->dc_link, false);
  536. aconnector->dc_sink = NULL;
  537. amdgpu_dm_update_connector_after_detect(aconnector);
  538. }
  539. drm_modeset_lock_all(ddev);
  540. ret = dm_display_resume(ddev);
  541. drm_modeset_unlock_all(ddev);
  542. amdgpu_dm_irq_resume(adev);
  543. return ret;
  544. }
  545. static const struct amd_ip_funcs amdgpu_dm_funcs = {
  546. .name = "dm",
  547. .early_init = dm_early_init,
  548. .late_init = dm_late_init,
  549. .sw_init = dm_sw_init,
  550. .sw_fini = dm_sw_fini,
  551. .hw_init = dm_hw_init,
  552. .hw_fini = dm_hw_fini,
  553. .suspend = dm_suspend,
  554. .resume = dm_resume,
  555. .is_idle = dm_is_idle,
  556. .wait_for_idle = dm_wait_for_idle,
  557. .check_soft_reset = dm_check_soft_reset,
  558. .soft_reset = dm_soft_reset,
  559. .set_clockgating_state = dm_set_clockgating_state,
  560. .set_powergating_state = dm_set_powergating_state,
  561. };
  562. const struct amdgpu_ip_block_version dm_ip_block =
  563. {
  564. .type = AMD_IP_BLOCK_TYPE_DCE,
  565. .major = 1,
  566. .minor = 0,
  567. .rev = 0,
  568. .funcs = &amdgpu_dm_funcs,
  569. };
  570. /* TODO: it is temporary non-const, should fixed later */
  571. static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
  572. .atomic_check = amdgpu_dm_atomic_check,
  573. .atomic_commit = amdgpu_dm_atomic_commit
  574. };
  575. void amdgpu_dm_update_connector_after_detect(
  576. struct amdgpu_connector *aconnector)
  577. {
  578. struct drm_connector *connector = &aconnector->base;
  579. struct drm_device *dev = connector->dev;
  580. const struct dc_sink *sink;
  581. /* MST handled by drm_mst framework */
  582. if (aconnector->mst_mgr.mst_state == true)
  583. return;
  584. sink = aconnector->dc_link->local_sink;
  585. /* Edid mgmt connector gets first update only in mode_valid hook and then
  586. * the connector sink is set to either fake or physical sink depends on link status.
  587. * don't do it here if u are during boot
  588. */
  589. if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
  590. && aconnector->dc_em_sink) {
  591. /* For S3 resume with headless use eml_sink to fake target
  592. * because on resume connecotr->sink is set ti NULL
  593. */
  594. mutex_lock(&dev->mode_config.mutex);
  595. if (sink) {
  596. if (aconnector->dc_sink) {
  597. amdgpu_dm_remove_sink_from_freesync_module(
  598. connector);
  599. /* retain and release bellow are used for
  600. * bump up refcount for sink because the link don't point
  601. * to it anymore after disconnect so on next crtc to connector
  602. * reshuffle by UMD we will get into unwanted dc_sink release
  603. */
  604. if (aconnector->dc_sink != aconnector->dc_em_sink)
  605. dc_sink_release(aconnector->dc_sink);
  606. }
  607. aconnector->dc_sink = sink;
  608. amdgpu_dm_add_sink_to_freesync_module(
  609. connector, aconnector->edid);
  610. } else {
  611. amdgpu_dm_remove_sink_from_freesync_module(connector);
  612. if (!aconnector->dc_sink)
  613. aconnector->dc_sink = aconnector->dc_em_sink;
  614. else if (aconnector->dc_sink != aconnector->dc_em_sink)
  615. dc_sink_retain(aconnector->dc_sink);
  616. }
  617. mutex_unlock(&dev->mode_config.mutex);
  618. return;
  619. }
  620. /*
  621. * TODO: temporary guard to look for proper fix
  622. * if this sink is MST sink, we should not do anything
  623. */
  624. if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
  625. return;
  626. if (aconnector->dc_sink == sink) {
  627. /* We got a DP short pulse (Link Loss, DP CTS, etc...).
  628. * Do nothing!! */
  629. DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
  630. aconnector->connector_id);
  631. return;
  632. }
  633. DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
  634. aconnector->connector_id, aconnector->dc_sink, sink);
  635. mutex_lock(&dev->mode_config.mutex);
  636. /* 1. Update status of the drm connector
  637. * 2. Send an event and let userspace tell us what to do */
  638. if (sink) {
  639. /* TODO: check if we still need the S3 mode update workaround.
  640. * If yes, put it here. */
  641. if (aconnector->dc_sink)
  642. amdgpu_dm_remove_sink_from_freesync_module(
  643. connector);
  644. aconnector->dc_sink = sink;
  645. if (sink->dc_edid.length == 0)
  646. aconnector->edid = NULL;
  647. else {
  648. aconnector->edid =
  649. (struct edid *) sink->dc_edid.raw_edid;
  650. drm_mode_connector_update_edid_property(connector,
  651. aconnector->edid);
  652. }
  653. amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
  654. } else {
  655. amdgpu_dm_remove_sink_from_freesync_module(connector);
  656. drm_mode_connector_update_edid_property(connector, NULL);
  657. aconnector->num_modes = 0;
  658. aconnector->dc_sink = NULL;
  659. }
  660. mutex_unlock(&dev->mode_config.mutex);
  661. }
  662. static void handle_hpd_irq(void *param)
  663. {
  664. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  665. struct drm_connector *connector = &aconnector->base;
  666. struct drm_device *dev = connector->dev;
  667. /* In case of failure or MST no need to update connector status or notify the OS
  668. * since (for MST case) MST does this in it's own context.
  669. */
  670. mutex_lock(&aconnector->hpd_lock);
  671. if (dc_link_detect(aconnector->dc_link, false)) {
  672. amdgpu_dm_update_connector_after_detect(aconnector);
  673. drm_modeset_lock_all(dev);
  674. dm_restore_drm_connector_state(dev, connector);
  675. drm_modeset_unlock_all(dev);
  676. if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
  677. drm_kms_helper_hotplug_event(dev);
  678. }
  679. mutex_unlock(&aconnector->hpd_lock);
  680. }
  681. static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
  682. {
  683. uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
  684. uint8_t dret;
  685. bool new_irq_handled = false;
  686. int dpcd_addr;
  687. int dpcd_bytes_to_read;
  688. const int max_process_count = 30;
  689. int process_count = 0;
  690. const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
  691. if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
  692. dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
  693. /* DPCD 0x200 - 0x201 for downstream IRQ */
  694. dpcd_addr = DP_SINK_COUNT;
  695. } else {
  696. dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
  697. /* DPCD 0x2002 - 0x2005 for downstream IRQ */
  698. dpcd_addr = DP_SINK_COUNT_ESI;
  699. }
  700. dret = drm_dp_dpcd_read(
  701. &aconnector->dm_dp_aux.aux,
  702. dpcd_addr,
  703. esi,
  704. dpcd_bytes_to_read);
  705. while (dret == dpcd_bytes_to_read &&
  706. process_count < max_process_count) {
  707. uint8_t retry;
  708. dret = 0;
  709. process_count++;
  710. DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  711. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
  712. /* handle HPD short pulse irq */
  713. if (aconnector->mst_mgr.mst_state)
  714. drm_dp_mst_hpd_irq(
  715. &aconnector->mst_mgr,
  716. esi,
  717. &new_irq_handled);
  718. #endif
  719. if (new_irq_handled) {
  720. /* ACK at DPCD to notify down stream */
  721. const int ack_dpcd_bytes_to_write =
  722. dpcd_bytes_to_read - 1;
  723. for (retry = 0; retry < 3; retry++) {
  724. uint8_t wret;
  725. wret = drm_dp_dpcd_write(
  726. &aconnector->dm_dp_aux.aux,
  727. dpcd_addr + 1,
  728. &esi[1],
  729. ack_dpcd_bytes_to_write);
  730. if (wret == ack_dpcd_bytes_to_write)
  731. break;
  732. }
  733. /* check if there is new irq to be handle */
  734. dret = drm_dp_dpcd_read(
  735. &aconnector->dm_dp_aux.aux,
  736. dpcd_addr,
  737. esi,
  738. dpcd_bytes_to_read);
  739. new_irq_handled = false;
  740. } else
  741. break;
  742. }
  743. if (process_count == max_process_count)
  744. DRM_DEBUG_KMS("Loop exceeded max iterations\n");
  745. }
  746. static void handle_hpd_rx_irq(void *param)
  747. {
  748. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  749. struct drm_connector *connector = &aconnector->base;
  750. struct drm_device *dev = connector->dev;
  751. const struct dc_link *dc_link = aconnector->dc_link;
  752. bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
  753. /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
  754. * conflict, after implement i2c helper, this mutex should be
  755. * retired.
  756. */
  757. if (aconnector->dc_link->type != dc_connection_mst_branch)
  758. mutex_lock(&aconnector->hpd_lock);
  759. if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
  760. !is_mst_root_connector) {
  761. /* Downstream Port status changed. */
  762. if (dc_link_detect(aconnector->dc_link, false)) {
  763. amdgpu_dm_update_connector_after_detect(aconnector);
  764. drm_modeset_lock_all(dev);
  765. dm_restore_drm_connector_state(dev, connector);
  766. drm_modeset_unlock_all(dev);
  767. drm_kms_helper_hotplug_event(dev);
  768. }
  769. }
  770. if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
  771. (dc_link->type == dc_connection_mst_branch))
  772. dm_handle_hpd_rx_irq(aconnector);
  773. if (aconnector->dc_link->type != dc_connection_mst_branch)
  774. mutex_unlock(&aconnector->hpd_lock);
  775. }
  776. static void register_hpd_handlers(struct amdgpu_device *adev)
  777. {
  778. struct drm_device *dev = adev->ddev;
  779. struct drm_connector *connector;
  780. struct amdgpu_connector *aconnector;
  781. const struct dc_link *dc_link;
  782. struct dc_interrupt_params int_params = {0};
  783. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  784. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  785. list_for_each_entry(connector,
  786. &dev->mode_config.connector_list, head) {
  787. aconnector = to_amdgpu_connector(connector);
  788. dc_link = aconnector->dc_link;
  789. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
  790. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  791. int_params.irq_source = dc_link->irq_source_hpd;
  792. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  793. handle_hpd_irq,
  794. (void *) aconnector);
  795. }
  796. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
  797. /* Also register for DP short pulse (hpd_rx). */
  798. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  799. int_params.irq_source = dc_link->irq_source_hpd_rx;
  800. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  801. handle_hpd_rx_irq,
  802. (void *) aconnector);
  803. }
  804. }
  805. }
  806. /* Register IRQ sources and initialize IRQ callbacks */
  807. static int dce110_register_irq_handlers(struct amdgpu_device *adev)
  808. {
  809. struct dc *dc = adev->dm.dc;
  810. struct common_irq_params *c_irq_params;
  811. struct dc_interrupt_params int_params = {0};
  812. int r;
  813. int i;
  814. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  815. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  816. /* Actions of amdgpu_irq_add_id():
  817. * 1. Register a set() function with base driver.
  818. * Base driver will call set() function to enable/disable an
  819. * interrupt in DC hardware.
  820. * 2. Register amdgpu_dm_irq_handler().
  821. * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
  822. * coming from DC hardware.
  823. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
  824. * for acknowledging and handling. */
  825. for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT;
  826. i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
  827. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->crtc_irq);
  828. if (r) {
  829. DRM_ERROR("Failed to add crtc irq id!\n");
  830. return r;
  831. }
  832. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  833. int_params.irq_source =
  834. dc_interrupt_to_irq_source(dc, i, 0);
  835. c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
  836. c_irq_params->adev = adev;
  837. c_irq_params->irq_src = int_params.irq_source;
  838. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  839. dm_crtc_high_irq, c_irq_params);
  840. }
  841. for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
  842. i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
  843. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
  844. if (r) {
  845. DRM_ERROR("Failed to add page flip irq id!\n");
  846. return r;
  847. }
  848. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  849. int_params.irq_source =
  850. dc_interrupt_to_irq_source(dc, i, 0);
  851. c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
  852. c_irq_params->adev = adev;
  853. c_irq_params->irq_src = int_params.irq_source;
  854. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  855. dm_pflip_high_irq, c_irq_params);
  856. }
  857. /* HPD */
  858. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A,
  859. &adev->hpd_irq);
  860. if (r) {
  861. DRM_ERROR("Failed to add hpd irq id!\n");
  862. return r;
  863. }
  864. register_hpd_handlers(adev);
  865. return 0;
  866. }
  867. static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
  868. {
  869. int r;
  870. adev->mode_info.mode_config_initialized = true;
  871. amdgpu_dm_mode_funcs.fb_create =
  872. amdgpu_mode_funcs.fb_create;
  873. amdgpu_dm_mode_funcs.output_poll_changed =
  874. amdgpu_mode_funcs.output_poll_changed;
  875. adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
  876. adev->ddev->mode_config.max_width = 16384;
  877. adev->ddev->mode_config.max_height = 16384;
  878. adev->ddev->mode_config.preferred_depth = 24;
  879. adev->ddev->mode_config.prefer_shadow = 1;
  880. /* indicate support of immediate flip */
  881. adev->ddev->mode_config.async_page_flip = true;
  882. adev->ddev->mode_config.fb_base = adev->mc.aper_base;
  883. r = amdgpu_modeset_create_props(adev);
  884. if (r)
  885. return r;
  886. return 0;
  887. }
  888. #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
  889. defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
  890. static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
  891. {
  892. struct amdgpu_display_manager *dm = bl_get_data(bd);
  893. if (dc_link_set_backlight_level(dm->backlight_link,
  894. bd->props.brightness, 0, 0))
  895. return 0;
  896. else
  897. return 1;
  898. }
  899. static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
  900. {
  901. return bd->props.brightness;
  902. }
  903. static const struct backlight_ops amdgpu_dm_backlight_ops = {
  904. .get_brightness = amdgpu_dm_backlight_get_brightness,
  905. .update_status = amdgpu_dm_backlight_update_status,
  906. };
  907. void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
  908. {
  909. char bl_name[16];
  910. struct backlight_properties props = { 0 };
  911. props.max_brightness = AMDGPU_MAX_BL_LEVEL;
  912. props.type = BACKLIGHT_RAW;
  913. snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
  914. dm->adev->ddev->primary->index);
  915. dm->backlight_dev = backlight_device_register(bl_name,
  916. dm->adev->ddev->dev,
  917. dm,
  918. &amdgpu_dm_backlight_ops,
  919. &props);
  920. if (NULL == dm->backlight_dev)
  921. DRM_ERROR("DM: Backlight registration failed!\n");
  922. else
  923. DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
  924. }
  925. #endif
  926. /* In this architecture, the association
  927. * connector -> encoder -> crtc
  928. * id not really requried. The crtc and connector will hold the
  929. * display_index as an abstraction to use with DAL component
  930. *
  931. * Returns 0 on success
  932. */
  933. int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
  934. {
  935. struct amdgpu_display_manager *dm = &adev->dm;
  936. uint32_t i;
  937. struct amdgpu_connector *aconnector;
  938. struct amdgpu_encoder *aencoder;
  939. struct amdgpu_crtc *acrtc;
  940. uint32_t link_cnt;
  941. link_cnt = dm->dc->caps.max_links;
  942. if (amdgpu_dm_mode_config_init(dm->adev)) {
  943. DRM_ERROR("DM: Failed to initialize mode config\n");
  944. return -1;
  945. }
  946. for (i = 0; i < dm->dc->caps.max_targets; i++) {
  947. acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
  948. if (!acrtc)
  949. goto fail;
  950. if (amdgpu_dm_crtc_init(
  951. dm,
  952. acrtc,
  953. i)) {
  954. DRM_ERROR("KMS: Failed to initialize crtc\n");
  955. kfree(acrtc);
  956. goto fail;
  957. }
  958. }
  959. dm->display_indexes_num = dm->dc->caps.max_targets;
  960. /* loops over all connectors on the board */
  961. for (i = 0; i < link_cnt; i++) {
  962. if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
  963. DRM_ERROR(
  964. "KMS: Cannot support more than %d display indexes\n",
  965. AMDGPU_DM_MAX_DISPLAY_INDEX);
  966. continue;
  967. }
  968. aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
  969. if (!aconnector)
  970. goto fail;
  971. aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
  972. if (!aencoder) {
  973. goto fail_free_connector;
  974. }
  975. if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
  976. DRM_ERROR("KMS: Failed to initialize encoder\n");
  977. goto fail_free_encoder;
  978. }
  979. if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
  980. DRM_ERROR("KMS: Failed to initialize connector\n");
  981. goto fail_free_connector;
  982. }
  983. if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
  984. amdgpu_dm_update_connector_after_detect(aconnector);
  985. }
  986. /* Software is initialized. Now we can register interrupt handlers. */
  987. switch (adev->asic_type) {
  988. case CHIP_BONAIRE:
  989. case CHIP_HAWAII:
  990. case CHIP_TONGA:
  991. case CHIP_FIJI:
  992. case CHIP_CARRIZO:
  993. case CHIP_STONEY:
  994. case CHIP_POLARIS11:
  995. case CHIP_POLARIS10:
  996. case CHIP_POLARIS12:
  997. if (dce110_register_irq_handlers(dm->adev)) {
  998. DRM_ERROR("DM: Failed to initialize IRQ\n");
  999. return -1;
  1000. }
  1001. break;
  1002. default:
  1003. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  1004. return -1;
  1005. }
  1006. drm_mode_config_reset(dm->ddev);
  1007. return 0;
  1008. fail_free_encoder:
  1009. kfree(aencoder);
  1010. fail_free_connector:
  1011. kfree(aconnector);
  1012. fail:
  1013. return -1;
  1014. }
  1015. void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
  1016. {
  1017. drm_mode_config_cleanup(dm->ddev);
  1018. return;
  1019. }
  1020. /******************************************************************************
  1021. * amdgpu_display_funcs functions
  1022. *****************************************************************************/
  1023. /**
  1024. * dm_bandwidth_update - program display watermarks
  1025. *
  1026. * @adev: amdgpu_device pointer
  1027. *
  1028. * Calculate and program the display watermarks and line buffer allocation.
  1029. */
  1030. static void dm_bandwidth_update(struct amdgpu_device *adev)
  1031. {
  1032. /* TODO: implement later */
  1033. }
  1034. static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
  1035. u8 level)
  1036. {
  1037. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  1038. }
  1039. static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
  1040. {
  1041. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  1042. return 0;
  1043. }
  1044. /******************************************************************************
  1045. * Page Flip functions
  1046. ******************************************************************************/
  1047. /**
  1048. * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
  1049. * via DRM IOCTL, by user mode.
  1050. *
  1051. * @adev: amdgpu_device pointer
  1052. * @crtc_id: crtc to cleanup pageflip on
  1053. * @crtc_base: new address of the crtc (GPU MC address)
  1054. *
  1055. * Does the actual pageflip (surface address update).
  1056. */
  1057. static void dm_page_flip(struct amdgpu_device *adev,
  1058. int crtc_id, u64 crtc_base, bool async)
  1059. {
  1060. struct amdgpu_crtc *acrtc;
  1061. struct dc_target *target;
  1062. struct dc_flip_addrs addr = { {0} };
  1063. /*
  1064. * TODO risk of concurrency issues
  1065. *
  1066. * This should guarded by the dal_mutex but we can't do this since the
  1067. * caller uses a spin_lock on event_lock.
  1068. *
  1069. * If we wait on the dal_mutex a second page flip interrupt might come,
  1070. * spin on the event_lock, disabling interrupts while it does so. At
  1071. * this point the core can no longer be pre-empted and return to the
  1072. * thread that waited on the dal_mutex and we're deadlocked.
  1073. *
  1074. * With multiple cores the same essentially happens but might just take
  1075. * a little longer to lock up all cores.
  1076. *
  1077. * The reason we should lock on dal_mutex is so that we can be sure
  1078. * nobody messes with acrtc->target after we read and check its value.
  1079. *
  1080. * We might be able to fix our concurrency issues with a work queue
  1081. * where we schedule all work items (mode_set, page_flip, etc.) and
  1082. * execute them one by one. Care needs to be taken to still deal with
  1083. * any potential concurrency issues arising from interrupt calls.
  1084. */
  1085. acrtc = adev->mode_info.crtcs[crtc_id];
  1086. target = acrtc->target;
  1087. /*
  1088. * Received a page flip call after the display has been reset.
  1089. * Just return in this case. Everything should be clean-up on reset.
  1090. */
  1091. if (!target) {
  1092. WARN_ON(1);
  1093. return;
  1094. }
  1095. addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
  1096. addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
  1097. addr.flip_immediate = async;
  1098. DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
  1099. __func__,
  1100. addr.address.grph.addr.high_part,
  1101. addr.address.grph.addr.low_part);
  1102. dc_flip_surface_addrs(
  1103. adev->dm.dc,
  1104. dc_target_get_status(target)->surfaces,
  1105. &addr, 1);
  1106. }
  1107. static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
  1108. struct drm_file *filp)
  1109. {
  1110. struct mod_freesync_params freesync_params;
  1111. uint8_t num_targets;
  1112. uint8_t i;
  1113. struct dc_target *target;
  1114. struct amdgpu_device *adev = dev->dev_private;
  1115. int r = 0;
  1116. /* Get freesync enable flag from DRM */
  1117. num_targets = dc_get_current_target_count(adev->dm.dc);
  1118. for (i = 0; i < num_targets; i++) {
  1119. target = dc_get_target_at_index(adev->dm.dc, i);
  1120. mod_freesync_update_state(adev->dm.freesync_module,
  1121. target->streams,
  1122. target->stream_count,
  1123. &freesync_params);
  1124. }
  1125. return r;
  1126. }
  1127. static const struct amdgpu_display_funcs dm_display_funcs = {
  1128. .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
  1129. .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
  1130. .vblank_wait = NULL,
  1131. .backlight_set_level =
  1132. dm_set_backlight_level,/* called unconditionally */
  1133. .backlight_get_level =
  1134. dm_get_backlight_level,/* called unconditionally */
  1135. .hpd_sense = NULL,/* called unconditionally */
  1136. .hpd_set_polarity = NULL, /* called unconditionally */
  1137. .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
  1138. .page_flip = dm_page_flip, /* called unconditionally */
  1139. .page_flip_get_scanoutpos =
  1140. dm_crtc_get_scanoutpos,/* called unconditionally */
  1141. .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
  1142. .add_connector = NULL, /* VBIOS parsing. DAL does it. */
  1143. .notify_freesync = amdgpu_notify_freesync,
  1144. };
  1145. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1146. static ssize_t s3_debug_store(
  1147. struct device *device,
  1148. struct device_attribute *attr,
  1149. const char *buf,
  1150. size_t count)
  1151. {
  1152. int ret;
  1153. int s3_state;
  1154. struct pci_dev *pdev = to_pci_dev(device);
  1155. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  1156. struct amdgpu_device *adev = drm_dev->dev_private;
  1157. ret = kstrtoint(buf, 0, &s3_state);
  1158. if (ret == 0) {
  1159. if (s3_state) {
  1160. dm_resume(adev);
  1161. amdgpu_dm_display_resume(adev);
  1162. drm_kms_helper_hotplug_event(adev->ddev);
  1163. } else
  1164. dm_suspend(adev);
  1165. }
  1166. return ret == 0 ? count : 0;
  1167. }
  1168. DEVICE_ATTR_WO(s3_debug);
  1169. #endif
  1170. static int dm_early_init(void *handle)
  1171. {
  1172. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1173. amdgpu_dm_set_irq_funcs(adev);
  1174. switch (adev->asic_type) {
  1175. case CHIP_BONAIRE:
  1176. case CHIP_HAWAII:
  1177. adev->mode_info.num_crtc = 6;
  1178. adev->mode_info.num_hpd = 6;
  1179. adev->mode_info.num_dig = 6;
  1180. break;
  1181. case CHIP_FIJI:
  1182. case CHIP_TONGA:
  1183. adev->mode_info.num_crtc = 6;
  1184. adev->mode_info.num_hpd = 6;
  1185. adev->mode_info.num_dig = 7;
  1186. break;
  1187. case CHIP_CARRIZO:
  1188. adev->mode_info.num_crtc = 3;
  1189. adev->mode_info.num_hpd = 6;
  1190. adev->mode_info.num_dig = 9;
  1191. break;
  1192. case CHIP_STONEY:
  1193. adev->mode_info.num_crtc = 2;
  1194. adev->mode_info.num_hpd = 6;
  1195. adev->mode_info.num_dig = 9;
  1196. break;
  1197. case CHIP_POLARIS11:
  1198. case CHIP_POLARIS12:
  1199. adev->mode_info.num_crtc = 5;
  1200. adev->mode_info.num_hpd = 5;
  1201. adev->mode_info.num_dig = 5;
  1202. break;
  1203. case CHIP_POLARIS10:
  1204. adev->mode_info.num_crtc = 6;
  1205. adev->mode_info.num_hpd = 6;
  1206. adev->mode_info.num_dig = 6;
  1207. break;
  1208. default:
  1209. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  1210. return -EINVAL;
  1211. }
  1212. if (adev->mode_info.funcs == NULL)
  1213. adev->mode_info.funcs = &dm_display_funcs;
  1214. /* Note: Do NOT change adev->audio_endpt_rreg and
  1215. * adev->audio_endpt_wreg because they are initialised in
  1216. * amdgpu_device_init() */
  1217. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1218. device_create_file(
  1219. adev->ddev->dev,
  1220. &dev_attr_s3_debug);
  1221. #endif
  1222. return 0;
  1223. }
  1224. bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
  1225. {
  1226. /* TODO */
  1227. return true;
  1228. }
  1229. bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
  1230. {
  1231. /* TODO */
  1232. return true;
  1233. }