amdgpu_dm.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: AMD
  23. *
  24. */
  25. #include "dm_services_types.h"
  26. #include "dc.h"
  27. #include "vid.h"
  28. #include "amdgpu.h"
  29. #include "atom.h"
  30. #include "amdgpu_dm.h"
  31. #include "amdgpu_dm_types.h"
  32. #include "amd_shared.h"
  33. #include "amdgpu_dm_irq.h"
  34. #include "dm_helpers.h"
  35. #include "ivsrcid/ivsrcid_vislands30.h"
  36. #include <linux/module.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/version.h>
  39. #include <drm/drm_atomic.h>
  40. #include <drm/drm_atomic_helper.h>
  41. #include <drm/drm_dp_mst_helper.h>
  42. #include "modules/inc/mod_freesync.h"
  43. /* Debug facilities */
  44. #define AMDGPU_DM_NOT_IMPL(fmt, ...) \
  45. DRM_INFO("DM_NOT_IMPL: " fmt, ##__VA_ARGS__)
  46. /*
  47. * dm_vblank_get_counter
  48. *
  49. * @brief
  50. * Get counter for number of vertical blanks
  51. *
  52. * @param
  53. * struct amdgpu_device *adev - [in] desired amdgpu device
  54. * int disp_idx - [in] which CRTC to get the counter from
  55. *
  56. * @return
  57. * Counter for vertical blanks
  58. */
  59. static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
  60. {
  61. if (crtc >= adev->mode_info.num_crtc)
  62. return 0;
  63. else {
  64. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  65. if (NULL == acrtc->target) {
  66. DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
  67. return 0;
  68. }
  69. return dc_target_get_vblank_counter(acrtc->target);
  70. }
  71. }
  72. static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
  73. u32 *vbl, u32 *position)
  74. {
  75. if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  76. return -EINVAL;
  77. else {
  78. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  79. if (NULL == acrtc->target) {
  80. DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
  81. return 0;
  82. }
  83. return dc_target_get_scanoutpos(acrtc->target, vbl, position);
  84. }
  85. return 0;
  86. }
  87. static bool dm_is_idle(void *handle)
  88. {
  89. /* XXX todo */
  90. return true;
  91. }
  92. static int dm_wait_for_idle(void *handle)
  93. {
  94. /* XXX todo */
  95. return 0;
  96. }
  97. static bool dm_check_soft_reset(void *handle)
  98. {
  99. return false;
  100. }
  101. static int dm_soft_reset(void *handle)
  102. {
  103. /* XXX todo */
  104. return 0;
  105. }
  106. static struct amdgpu_crtc *get_crtc_by_otg_inst(
  107. struct amdgpu_device *adev,
  108. int otg_inst)
  109. {
  110. struct drm_device *dev = adev->ddev;
  111. struct drm_crtc *crtc;
  112. struct amdgpu_crtc *amdgpu_crtc;
  113. /*
  114. * following if is check inherited from both functions where this one is
  115. * used now. Need to be checked why it could happen.
  116. */
  117. if (otg_inst == -1) {
  118. WARN_ON(1);
  119. return adev->mode_info.crtcs[0];
  120. }
  121. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  122. amdgpu_crtc = to_amdgpu_crtc(crtc);
  123. if (amdgpu_crtc->otg_inst == otg_inst)
  124. return amdgpu_crtc;
  125. }
  126. return NULL;
  127. }
  128. static void dm_pflip_high_irq(void *interrupt_params)
  129. {
  130. struct amdgpu_flip_work *works;
  131. struct amdgpu_crtc *amdgpu_crtc;
  132. struct common_irq_params *irq_params = interrupt_params;
  133. struct amdgpu_device *adev = irq_params->adev;
  134. unsigned long flags;
  135. amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
  136. /* IRQ could occur when in initial stage */
  137. /*TODO work and BO cleanup */
  138. if (amdgpu_crtc == NULL) {
  139. DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
  140. return;
  141. }
  142. spin_lock_irqsave(&adev->ddev->event_lock, flags);
  143. works = amdgpu_crtc->pflip_works;
  144. if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
  145. DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
  146. amdgpu_crtc->pflip_status,
  147. AMDGPU_FLIP_SUBMITTED,
  148. amdgpu_crtc->crtc_id,
  149. amdgpu_crtc);
  150. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  151. return;
  152. }
  153. /* page flip completed. clean up */
  154. amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
  155. amdgpu_crtc->pflip_works = NULL;
  156. /* wakeup usersapce */
  157. if (works->event)
  158. drm_crtc_send_vblank_event(&amdgpu_crtc->base,
  159. works->event);
  160. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  161. DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n",
  162. __func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works);
  163. drm_crtc_vblank_put(&amdgpu_crtc->base);
  164. schedule_work(&works->unpin_work);
  165. }
  166. static void dm_crtc_high_irq(void *interrupt_params)
  167. {
  168. struct common_irq_params *irq_params = interrupt_params;
  169. struct amdgpu_device *adev = irq_params->adev;
  170. uint8_t crtc_index = 0;
  171. struct amdgpu_crtc *acrtc;
  172. acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
  173. if (acrtc)
  174. crtc_index = acrtc->crtc_id;
  175. drm_handle_vblank(adev->ddev, crtc_index);
  176. }
  177. static int dm_set_clockgating_state(void *handle,
  178. enum amd_clockgating_state state)
  179. {
  180. return 0;
  181. }
  182. static int dm_set_powergating_state(void *handle,
  183. enum amd_powergating_state state)
  184. {
  185. return 0;
  186. }
  187. /* Prototypes of private functions */
  188. static int dm_early_init(void* handle);
  189. static void hotplug_notify_work_func(struct work_struct *work)
  190. {
  191. struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
  192. struct drm_device *dev = dm->ddev;
  193. drm_kms_helper_hotplug_event(dev);
  194. }
  195. /* Init display KMS
  196. *
  197. * Returns 0 on success
  198. */
  199. int amdgpu_dm_init(struct amdgpu_device *adev)
  200. {
  201. struct dc_init_data init_data;
  202. adev->dm.ddev = adev->ddev;
  203. adev->dm.adev = adev;
  204. DRM_INFO("DAL is enabled\n");
  205. /* Zero all the fields */
  206. memset(&init_data, 0, sizeof(init_data));
  207. /* initialize DAL's lock (for SYNC context use) */
  208. spin_lock_init(&adev->dm.dal_lock);
  209. /* initialize DAL's mutex */
  210. mutex_init(&adev->dm.dal_mutex);
  211. if(amdgpu_dm_irq_init(adev)) {
  212. DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
  213. goto error;
  214. }
  215. init_data.asic_id.chip_family = adev->family;
  216. init_data.asic_id.pci_revision_id = adev->rev_id;
  217. init_data.asic_id.hw_internal_rev = adev->external_rev_id;
  218. init_data.asic_id.vram_width = adev->mc.vram_width;
  219. /* TODO: initialize init_data.asic_id.vram_type here!!!! */
  220. init_data.asic_id.atombios_base_address =
  221. adev->mode_info.atom_context->bios;
  222. init_data.driver = adev;
  223. adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
  224. if (!adev->dm.cgs_device) {
  225. DRM_ERROR("amdgpu: failed to create cgs device.\n");
  226. goto error;
  227. }
  228. init_data.cgs_device = adev->dm.cgs_device;
  229. adev->dm.dal = NULL;
  230. init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
  231. /* Display Core create. */
  232. adev->dm.dc = dc_create(&init_data);
  233. if (!adev->dm.dc)
  234. DRM_INFO("Display Core failed to initialize!\n");
  235. INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
  236. adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
  237. if (!adev->dm.freesync_module) {
  238. DRM_ERROR(
  239. "amdgpu: failed to initialize freesync_module.\n");
  240. } else
  241. DRM_INFO("amdgpu: freesync_module init done %p.\n",
  242. adev->dm.freesync_module);
  243. if (amdgpu_dm_initialize_drm_device(adev)) {
  244. DRM_ERROR(
  245. "amdgpu: failed to initialize sw for display support.\n");
  246. goto error;
  247. }
  248. /* Update the actual used number of crtc */
  249. adev->mode_info.num_crtc = adev->dm.display_indexes_num;
  250. /* TODO: Add_display_info? */
  251. /* TODO use dynamic cursor width */
  252. adev->ddev->mode_config.cursor_width = 128;
  253. adev->ddev->mode_config.cursor_height = 128;
  254. if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
  255. DRM_ERROR(
  256. "amdgpu: failed to initialize sw for display support.\n");
  257. goto error;
  258. }
  259. DRM_INFO("KMS initialized.\n");
  260. return 0;
  261. error:
  262. amdgpu_dm_fini(adev);
  263. return -1;
  264. }
  265. void amdgpu_dm_fini(struct amdgpu_device *adev)
  266. {
  267. amdgpu_dm_destroy_drm_device(&adev->dm);
  268. /*
  269. * TODO: pageflip, vlank interrupt
  270. *
  271. * amdgpu_dm_irq_fini(adev);
  272. */
  273. if (adev->dm.cgs_device) {
  274. amdgpu_cgs_destroy_device(adev->dm.cgs_device);
  275. adev->dm.cgs_device = NULL;
  276. }
  277. if (adev->dm.freesync_module) {
  278. mod_freesync_destroy(adev->dm.freesync_module);
  279. adev->dm.freesync_module = NULL;
  280. }
  281. /* DC Destroy TODO: Replace destroy DAL */
  282. {
  283. dc_destroy(&adev->dm.dc);
  284. }
  285. return;
  286. }
  287. /* moved from amdgpu_dm_kms.c */
  288. void amdgpu_dm_destroy()
  289. {
  290. }
  291. static int dm_sw_init(void *handle)
  292. {
  293. return 0;
  294. }
  295. static int dm_sw_fini(void *handle)
  296. {
  297. return 0;
  298. }
  299. static void detect_link_for_all_connectors(struct drm_device *dev)
  300. {
  301. struct amdgpu_connector *aconnector;
  302. struct drm_connector *connector;
  303. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  304. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  305. aconnector = to_amdgpu_connector(connector);
  306. if (aconnector->dc_link->type == dc_connection_mst_branch) {
  307. DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
  308. aconnector, aconnector->base.base.id);
  309. if (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) < 0) {
  310. DRM_ERROR("DM_MST: Failed to start MST\n");
  311. ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
  312. }
  313. }
  314. }
  315. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  316. }
  317. static void s3_handle_mst(struct drm_device *dev, bool suspend)
  318. {
  319. struct amdgpu_connector *aconnector;
  320. struct drm_connector *connector;
  321. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  322. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  323. aconnector = to_amdgpu_connector(connector);
  324. if (aconnector->dc_link->type == dc_connection_mst_branch &&
  325. !aconnector->mst_port) {
  326. if (suspend)
  327. drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
  328. else
  329. drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
  330. }
  331. }
  332. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  333. }
  334. static int dm_hw_init(void *handle)
  335. {
  336. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  337. /* Create DAL display manager */
  338. amdgpu_dm_init(adev);
  339. amdgpu_dm_hpd_init(adev);
  340. detect_link_for_all_connectors(adev->ddev);
  341. return 0;
  342. }
  343. static int dm_hw_fini(void *handle)
  344. {
  345. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  346. amdgpu_dm_hpd_fini(adev);
  347. amdgpu_dm_irq_fini(adev);
  348. return 0;
  349. }
  350. static int dm_suspend(void *handle)
  351. {
  352. struct amdgpu_device *adev = handle;
  353. struct amdgpu_display_manager *dm = &adev->dm;
  354. int ret = 0;
  355. struct drm_crtc *crtc;
  356. s3_handle_mst(adev->ddev, true);
  357. /* flash all pending vblank events and turn interrupt off
  358. * before disabling CRTCs. They will be enabled back in
  359. * dm_display_resume
  360. */
  361. drm_modeset_lock_all(adev->ddev);
  362. list_for_each_entry(crtc, &adev->ddev->mode_config.crtc_list, head) {
  363. struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
  364. if (acrtc->target)
  365. drm_crtc_vblank_off(crtc);
  366. }
  367. drm_modeset_unlock_all(adev->ddev);
  368. amdgpu_dm_irq_suspend(adev);
  369. dc_set_power_state(
  370. dm->dc,
  371. DC_ACPI_CM_POWER_STATE_D3,
  372. DC_VIDEO_POWER_SUSPEND);
  373. return ret;
  374. }
  375. struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
  376. struct drm_atomic_state *state,
  377. struct drm_crtc *crtc,
  378. bool from_state_var)
  379. {
  380. uint32_t i;
  381. struct drm_connector_state *conn_state;
  382. struct drm_connector *connector;
  383. struct drm_crtc *crtc_from_state;
  384. for_each_connector_in_state(
  385. state,
  386. connector,
  387. conn_state,
  388. i) {
  389. crtc_from_state =
  390. from_state_var ?
  391. conn_state->crtc :
  392. connector->state->crtc;
  393. if (crtc_from_state == crtc)
  394. return to_amdgpu_connector(connector);
  395. }
  396. return NULL;
  397. }
  398. static int dm_display_resume(struct drm_device *ddev)
  399. {
  400. int ret = 0;
  401. struct drm_connector *connector;
  402. struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
  403. struct drm_plane *plane;
  404. struct drm_crtc *crtc;
  405. struct amdgpu_connector *aconnector;
  406. struct drm_connector_state *conn_state;
  407. if (!state)
  408. return ENOMEM;
  409. state->acquire_ctx = ddev->mode_config.acquire_ctx;
  410. /* Construct an atomic state to restore previous display setting */
  411. /*
  412. * Attach connectors to drm_atomic_state
  413. * Should be done in the first place in order to make connectors
  414. * available in state during crtc state processing. It is used for
  415. * making decision if crtc should be disabled in case sink got
  416. * disconnected.
  417. *
  418. * Connectors state crtc with NULL dc_sink should be cleared, because it
  419. * will fail validation during commit
  420. */
  421. list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
  422. aconnector = to_amdgpu_connector(connector);
  423. conn_state = drm_atomic_get_connector_state(state, connector);
  424. ret = PTR_ERR_OR_ZERO(conn_state);
  425. if (ret)
  426. goto err;
  427. }
  428. /* Attach crtcs to drm_atomic_state*/
  429. list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
  430. struct drm_crtc_state *crtc_state =
  431. drm_atomic_get_crtc_state(state, crtc);
  432. ret = PTR_ERR_OR_ZERO(crtc_state);
  433. if (ret)
  434. goto err;
  435. /* force a restore */
  436. crtc_state->mode_changed = true;
  437. }
  438. /* Attach planes to drm_atomic_state */
  439. list_for_each_entry(plane, &ddev->mode_config.plane_list, head) {
  440. struct drm_crtc *crtc;
  441. struct drm_gem_object *obj;
  442. struct drm_framebuffer *fb;
  443. struct amdgpu_framebuffer *afb;
  444. struct amdgpu_bo *rbo;
  445. int r;
  446. struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane);
  447. ret = PTR_ERR_OR_ZERO(plane_state);
  448. if (ret)
  449. goto err;
  450. crtc = plane_state->crtc;
  451. fb = plane_state->fb;
  452. if (!crtc || !crtc->state || !crtc->state->active)
  453. continue;
  454. if (!fb) {
  455. DRM_DEBUG_KMS("No FB bound\n");
  456. return 0;
  457. }
  458. /*
  459. * Pin back the front buffers, cursor buffer was already pinned
  460. * back in amdgpu_resume_kms
  461. */
  462. afb = to_amdgpu_framebuffer(fb);
  463. obj = afb->obj;
  464. rbo = gem_to_amdgpu_bo(obj);
  465. r = amdgpu_bo_reserve(rbo, false);
  466. if (unlikely(r != 0))
  467. return r;
  468. r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
  469. amdgpu_bo_unreserve(rbo);
  470. if (unlikely(r != 0)) {
  471. DRM_ERROR("Failed to pin framebuffer\n");
  472. return r;
  473. }
  474. }
  475. /* Call commit internally with the state we just constructed */
  476. ret = drm_atomic_commit(state);
  477. if (!ret)
  478. return 0;
  479. err:
  480. DRM_ERROR("Restoring old state failed with %i\n", ret);
  481. drm_atomic_state_put(state);
  482. return ret;
  483. }
  484. static int dm_resume(void *handle)
  485. {
  486. struct amdgpu_device *adev = handle;
  487. struct amdgpu_display_manager *dm = &adev->dm;
  488. /* power on hardware */
  489. dc_set_power_state(
  490. dm->dc,
  491. DC_ACPI_CM_POWER_STATE_D0,
  492. DC_VIDEO_POWER_ON);
  493. return 0;
  494. }
  495. int amdgpu_dm_display_resume(struct amdgpu_device *adev )
  496. {
  497. struct drm_device *ddev = adev->ddev;
  498. struct amdgpu_display_manager *dm = &adev->dm;
  499. struct amdgpu_connector *aconnector;
  500. struct drm_connector *connector;
  501. int ret = 0;
  502. struct drm_crtc *crtc;
  503. /* program HPD filter */
  504. dc_resume(dm->dc);
  505. /* On resume we need to rewrite the MSTM control bits to enamble MST*/
  506. s3_handle_mst(ddev, false);
  507. /*
  508. * early enable HPD Rx IRQ, should be done before set mode as short
  509. * pulse interrupts are used for MST
  510. */
  511. amdgpu_dm_irq_resume_early(adev);
  512. drm_modeset_lock_all(ddev);
  513. list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
  514. struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
  515. if (acrtc->target)
  516. drm_crtc_vblank_on(crtc);
  517. }
  518. drm_modeset_unlock_all(ddev);
  519. /* Do detection*/
  520. list_for_each_entry(connector,
  521. &ddev->mode_config.connector_list, head) {
  522. aconnector = to_amdgpu_connector(connector);
  523. /*
  524. * this is the case when traversing through already created
  525. * MST connectors, should be skipped
  526. */
  527. if (aconnector->mst_port)
  528. continue;
  529. dc_link_detect(aconnector->dc_link, false);
  530. aconnector->dc_sink = NULL;
  531. amdgpu_dm_update_connector_after_detect(aconnector);
  532. }
  533. drm_modeset_lock_all(ddev);
  534. ret = dm_display_resume(ddev);
  535. drm_modeset_unlock_all(ddev);
  536. amdgpu_dm_irq_resume(adev);
  537. return ret;
  538. }
  539. static const struct amd_ip_funcs amdgpu_dm_funcs = {
  540. .name = "dm",
  541. .early_init = dm_early_init,
  542. .late_init = NULL,
  543. .sw_init = dm_sw_init,
  544. .sw_fini = dm_sw_fini,
  545. .hw_init = dm_hw_init,
  546. .hw_fini = dm_hw_fini,
  547. .suspend = dm_suspend,
  548. .resume = dm_resume,
  549. .is_idle = dm_is_idle,
  550. .wait_for_idle = dm_wait_for_idle,
  551. .check_soft_reset = dm_check_soft_reset,
  552. .soft_reset = dm_soft_reset,
  553. .set_clockgating_state = dm_set_clockgating_state,
  554. .set_powergating_state = dm_set_powergating_state,
  555. };
  556. const struct amdgpu_ip_block_version dm_ip_block =
  557. {
  558. .type = AMD_IP_BLOCK_TYPE_DCE,
  559. .major = 1,
  560. .minor = 0,
  561. .rev = 0,
  562. .funcs = &amdgpu_dm_funcs,
  563. };
  564. /* TODO: it is temporary non-const, should fixed later */
  565. static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
  566. .atomic_check = amdgpu_dm_atomic_check,
  567. .atomic_commit = amdgpu_dm_atomic_commit
  568. };
  569. void amdgpu_dm_update_connector_after_detect(
  570. struct amdgpu_connector *aconnector)
  571. {
  572. struct drm_connector *connector = &aconnector->base;
  573. struct drm_device *dev = connector->dev;
  574. const struct dc_sink *sink;
  575. /* MST handled by drm_mst framework */
  576. if (aconnector->mst_mgr.mst_state == true)
  577. return;
  578. sink = aconnector->dc_link->local_sink;
  579. /* Edid mgmt connector gets first update only in mode_valid hook and then
  580. * the connector sink is set to either fake or physical sink depends on link status.
  581. * don't do it here if u are during boot
  582. */
  583. if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
  584. && aconnector->dc_em_sink) {
  585. /* For S3 resume with headless use eml_sink to fake target
  586. * because on resume connecotr->sink is set ti NULL
  587. */
  588. mutex_lock(&dev->mode_config.mutex);
  589. if (sink) {
  590. if (aconnector->dc_sink)
  591. amdgpu_dm_remove_sink_from_freesync_module(
  592. connector);
  593. aconnector->dc_sink = sink;
  594. amdgpu_dm_add_sink_to_freesync_module(
  595. connector, aconnector->edid);
  596. } else {
  597. amdgpu_dm_remove_sink_from_freesync_module(connector);
  598. if (!aconnector->dc_sink)
  599. aconnector->dc_sink = aconnector->dc_em_sink;
  600. }
  601. mutex_unlock(&dev->mode_config.mutex);
  602. return;
  603. }
  604. /*
  605. * TODO: temporary guard to look for proper fix
  606. * if this sink is MST sink, we should not do anything
  607. */
  608. if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
  609. return;
  610. if (aconnector->dc_sink == sink) {
  611. /* We got a DP short pulse (Link Loss, DP CTS, etc...).
  612. * Do nothing!! */
  613. DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
  614. aconnector->connector_id);
  615. return;
  616. }
  617. DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
  618. aconnector->connector_id, aconnector->dc_sink, sink);
  619. mutex_lock(&dev->mode_config.mutex);
  620. /* 1. Update status of the drm connector
  621. * 2. Send an event and let userspace tell us what to do */
  622. if (sink) {
  623. /* TODO: check if we still need the S3 mode update workaround.
  624. * If yes, put it here. */
  625. if (aconnector->dc_sink)
  626. amdgpu_dm_remove_sink_from_freesync_module(
  627. connector);
  628. aconnector->dc_sink = sink;
  629. if (sink->dc_edid.length == 0)
  630. aconnector->edid = NULL;
  631. else {
  632. aconnector->edid =
  633. (struct edid *) sink->dc_edid.raw_edid;
  634. drm_mode_connector_update_edid_property(connector,
  635. aconnector->edid);
  636. }
  637. amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
  638. } else {
  639. amdgpu_dm_remove_sink_from_freesync_module(connector);
  640. drm_mode_connector_update_edid_property(connector, NULL);
  641. aconnector->num_modes = 0;
  642. aconnector->dc_sink = NULL;
  643. }
  644. mutex_unlock(&dev->mode_config.mutex);
  645. }
  646. static void handle_hpd_irq(void *param)
  647. {
  648. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  649. struct drm_connector *connector = &aconnector->base;
  650. struct drm_device *dev = connector->dev;
  651. /* In case of failure or MST no need to update connector status or notify the OS
  652. * since (for MST case) MST does this in it's own context.
  653. */
  654. mutex_lock(&aconnector->hpd_lock);
  655. if (dc_link_detect(aconnector->dc_link, false)) {
  656. amdgpu_dm_update_connector_after_detect(aconnector);
  657. drm_modeset_lock_all(dev);
  658. dm_restore_drm_connector_state(dev, connector);
  659. drm_modeset_unlock_all(dev);
  660. if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
  661. drm_kms_helper_hotplug_event(dev);
  662. }
  663. mutex_unlock(&aconnector->hpd_lock);
  664. }
  665. static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
  666. {
  667. uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
  668. uint8_t dret;
  669. bool new_irq_handled = false;
  670. int dpcd_addr;
  671. int dpcd_bytes_to_read;
  672. const int max_process_count = 30;
  673. int process_count = 0;
  674. const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
  675. if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
  676. dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
  677. /* DPCD 0x200 - 0x201 for downstream IRQ */
  678. dpcd_addr = DP_SINK_COUNT;
  679. } else {
  680. dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
  681. /* DPCD 0x2002 - 0x2005 for downstream IRQ */
  682. dpcd_addr = DP_SINK_COUNT_ESI;
  683. }
  684. dret = drm_dp_dpcd_read(
  685. &aconnector->dm_dp_aux.aux,
  686. dpcd_addr,
  687. esi,
  688. dpcd_bytes_to_read);
  689. while (dret == dpcd_bytes_to_read &&
  690. process_count < max_process_count) {
  691. uint8_t retry;
  692. dret = 0;
  693. process_count++;
  694. DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  695. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
  696. /* handle HPD short pulse irq */
  697. if (aconnector->mst_mgr.mst_state)
  698. drm_dp_mst_hpd_irq(
  699. &aconnector->mst_mgr,
  700. esi,
  701. &new_irq_handled);
  702. #endif
  703. if (new_irq_handled) {
  704. /* ACK at DPCD to notify down stream */
  705. const int ack_dpcd_bytes_to_write =
  706. dpcd_bytes_to_read - 1;
  707. for (retry = 0; retry < 3; retry++) {
  708. uint8_t wret;
  709. wret = drm_dp_dpcd_write(
  710. &aconnector->dm_dp_aux.aux,
  711. dpcd_addr + 1,
  712. &esi[1],
  713. ack_dpcd_bytes_to_write);
  714. if (wret == ack_dpcd_bytes_to_write)
  715. break;
  716. }
  717. /* check if there is new irq to be handle */
  718. dret = drm_dp_dpcd_read(
  719. &aconnector->dm_dp_aux.aux,
  720. dpcd_addr,
  721. esi,
  722. dpcd_bytes_to_read);
  723. new_irq_handled = false;
  724. } else
  725. break;
  726. }
  727. if (process_count == max_process_count)
  728. DRM_DEBUG_KMS("Loop exceeded max iterations\n");
  729. }
  730. static void handle_hpd_rx_irq(void *param)
  731. {
  732. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  733. struct drm_connector *connector = &aconnector->base;
  734. struct drm_device *dev = connector->dev;
  735. const struct dc_link *dc_link = aconnector->dc_link;
  736. bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
  737. /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
  738. * conflict, after implement i2c helper, this mutex should be
  739. * retired.
  740. */
  741. if (aconnector->dc_link->type != dc_connection_mst_branch)
  742. mutex_lock(&aconnector->hpd_lock);
  743. if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
  744. !is_mst_root_connector) {
  745. /* Downstream Port status changed. */
  746. if (dc_link_detect(aconnector->dc_link, false)) {
  747. amdgpu_dm_update_connector_after_detect(aconnector);
  748. drm_modeset_lock_all(dev);
  749. dm_restore_drm_connector_state(dev, connector);
  750. drm_modeset_unlock_all(dev);
  751. drm_kms_helper_hotplug_event(dev);
  752. }
  753. }
  754. if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
  755. (dc_link->type == dc_connection_mst_branch))
  756. dm_handle_hpd_rx_irq(aconnector);
  757. if (aconnector->dc_link->type != dc_connection_mst_branch)
  758. mutex_unlock(&aconnector->hpd_lock);
  759. }
  760. static void register_hpd_handlers(struct amdgpu_device *adev)
  761. {
  762. struct drm_device *dev = adev->ddev;
  763. struct drm_connector *connector;
  764. struct amdgpu_connector *aconnector;
  765. const struct dc_link *dc_link;
  766. struct dc_interrupt_params int_params = {0};
  767. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  768. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  769. list_for_each_entry(connector,
  770. &dev->mode_config.connector_list, head) {
  771. aconnector = to_amdgpu_connector(connector);
  772. dc_link = aconnector->dc_link;
  773. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
  774. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  775. int_params.irq_source = dc_link->irq_source_hpd;
  776. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  777. handle_hpd_irq,
  778. (void *) aconnector);
  779. }
  780. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
  781. /* Also register for DP short pulse (hpd_rx). */
  782. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  783. int_params.irq_source = dc_link->irq_source_hpd_rx;
  784. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  785. handle_hpd_rx_irq,
  786. (void *) aconnector);
  787. }
  788. }
  789. }
  790. /* Register IRQ sources and initialize IRQ callbacks */
  791. static int dce110_register_irq_handlers(struct amdgpu_device *adev)
  792. {
  793. struct dc *dc = adev->dm.dc;
  794. struct common_irq_params *c_irq_params;
  795. struct dc_interrupt_params int_params = {0};
  796. int r;
  797. int i;
  798. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  799. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  800. /* Actions of amdgpu_irq_add_id():
  801. * 1. Register a set() function with base driver.
  802. * Base driver will call set() function to enable/disable an
  803. * interrupt in DC hardware.
  804. * 2. Register amdgpu_dm_irq_handler().
  805. * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
  806. * coming from DC hardware.
  807. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
  808. * for acknowledging and handling. */
  809. for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT;
  810. i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
  811. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->crtc_irq);
  812. if (r) {
  813. DRM_ERROR("Failed to add crtc irq id!\n");
  814. return r;
  815. }
  816. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  817. int_params.irq_source =
  818. dc_interrupt_to_irq_source(dc, i, 0);
  819. c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
  820. c_irq_params->adev = adev;
  821. c_irq_params->irq_src = int_params.irq_source;
  822. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  823. dm_crtc_high_irq, c_irq_params);
  824. }
  825. for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
  826. i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
  827. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
  828. if (r) {
  829. DRM_ERROR("Failed to add page flip irq id!\n");
  830. return r;
  831. }
  832. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  833. int_params.irq_source =
  834. dc_interrupt_to_irq_source(dc, i, 0);
  835. c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
  836. c_irq_params->adev = adev;
  837. c_irq_params->irq_src = int_params.irq_source;
  838. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  839. dm_pflip_high_irq, c_irq_params);
  840. }
  841. /* HPD */
  842. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A,
  843. &adev->hpd_irq);
  844. if (r) {
  845. DRM_ERROR("Failed to add hpd irq id!\n");
  846. return r;
  847. }
  848. register_hpd_handlers(adev);
  849. return 0;
  850. }
  851. static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
  852. {
  853. int r;
  854. adev->mode_info.mode_config_initialized = true;
  855. amdgpu_dm_mode_funcs.fb_create =
  856. amdgpu_mode_funcs.fb_create;
  857. amdgpu_dm_mode_funcs.output_poll_changed =
  858. amdgpu_mode_funcs.output_poll_changed;
  859. adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
  860. adev->ddev->mode_config.max_width = 16384;
  861. adev->ddev->mode_config.max_height = 16384;
  862. adev->ddev->mode_config.preferred_depth = 24;
  863. adev->ddev->mode_config.prefer_shadow = 1;
  864. /* indicate support of immediate flip */
  865. adev->ddev->mode_config.async_page_flip = true;
  866. adev->ddev->mode_config.fb_base = adev->mc.aper_base;
  867. r = amdgpu_modeset_create_props(adev);
  868. if (r)
  869. return r;
  870. return 0;
  871. }
  872. #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
  873. defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
  874. static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
  875. {
  876. struct amdgpu_display_manager *dm = bl_get_data(bd);
  877. if (dc_link_set_backlight_level(dm->backlight_link,
  878. bd->props.brightness, 0, 0))
  879. return 0;
  880. else
  881. return 1;
  882. }
  883. static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
  884. {
  885. return bd->props.brightness;
  886. }
  887. static const struct backlight_ops amdgpu_dm_backlight_ops = {
  888. .get_brightness = amdgpu_dm_backlight_get_brightness,
  889. .update_status = amdgpu_dm_backlight_update_status,
  890. };
  891. void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
  892. {
  893. char bl_name[16];
  894. struct backlight_properties props = { 0 };
  895. props.max_brightness = AMDGPU_MAX_BL_LEVEL;
  896. props.type = BACKLIGHT_RAW;
  897. snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
  898. dm->adev->ddev->primary->index);
  899. dm->backlight_dev = backlight_device_register(bl_name,
  900. dm->adev->ddev->dev,
  901. dm,
  902. &amdgpu_dm_backlight_ops,
  903. &props);
  904. if (NULL == dm->backlight_dev)
  905. DRM_ERROR("DM: Backlight registration failed!\n");
  906. else
  907. DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
  908. }
  909. #endif
  910. /* In this architecture, the association
  911. * connector -> encoder -> crtc
  912. * id not really requried. The crtc and connector will hold the
  913. * display_index as an abstraction to use with DAL component
  914. *
  915. * Returns 0 on success
  916. */
  917. int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
  918. {
  919. struct amdgpu_display_manager *dm = &adev->dm;
  920. uint32_t i;
  921. struct amdgpu_connector *aconnector;
  922. struct amdgpu_encoder *aencoder;
  923. struct amdgpu_crtc *acrtc;
  924. uint32_t link_cnt;
  925. link_cnt = dm->dc->caps.max_links;
  926. if (amdgpu_dm_mode_config_init(dm->adev)) {
  927. DRM_ERROR("DM: Failed to initialize mode config\n");
  928. return -1;
  929. }
  930. for (i = 0; i < dm->dc->caps.max_targets; i++) {
  931. acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
  932. if (!acrtc)
  933. goto fail;
  934. if (amdgpu_dm_crtc_init(
  935. dm,
  936. acrtc,
  937. i)) {
  938. DRM_ERROR("KMS: Failed to initialize crtc\n");
  939. kfree(acrtc);
  940. goto fail;
  941. }
  942. }
  943. dm->display_indexes_num = dm->dc->caps.max_targets;
  944. /* loops over all connectors on the board */
  945. for (i = 0; i < link_cnt; i++) {
  946. if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
  947. DRM_ERROR(
  948. "KMS: Cannot support more than %d display indexes\n",
  949. AMDGPU_DM_MAX_DISPLAY_INDEX);
  950. continue;
  951. }
  952. aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
  953. if (!aconnector)
  954. goto fail;
  955. aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
  956. if (!aencoder) {
  957. goto fail_free_connector;
  958. }
  959. if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
  960. DRM_ERROR("KMS: Failed to initialize encoder\n");
  961. goto fail_free_encoder;
  962. }
  963. if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
  964. DRM_ERROR("KMS: Failed to initialize connector\n");
  965. goto fail_free_connector;
  966. }
  967. if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
  968. amdgpu_dm_update_connector_after_detect(aconnector);
  969. }
  970. /* Software is initialized. Now we can register interrupt handlers. */
  971. switch (adev->asic_type) {
  972. case CHIP_BONAIRE:
  973. case CHIP_HAWAII:
  974. case CHIP_TONGA:
  975. case CHIP_FIJI:
  976. case CHIP_CARRIZO:
  977. case CHIP_STONEY:
  978. case CHIP_POLARIS11:
  979. case CHIP_POLARIS10:
  980. if (dce110_register_irq_handlers(dm->adev)) {
  981. DRM_ERROR("DM: Failed to initialize IRQ\n");
  982. return -1;
  983. }
  984. break;
  985. default:
  986. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  987. return -1;
  988. }
  989. drm_mode_config_reset(dm->ddev);
  990. return 0;
  991. fail_free_encoder:
  992. kfree(aencoder);
  993. fail_free_connector:
  994. kfree(aconnector);
  995. fail:
  996. return -1;
  997. }
  998. void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
  999. {
  1000. drm_mode_config_cleanup(dm->ddev);
  1001. return;
  1002. }
  1003. /******************************************************************************
  1004. * amdgpu_display_funcs functions
  1005. *****************************************************************************/
  1006. /**
  1007. * dm_bandwidth_update - program display watermarks
  1008. *
  1009. * @adev: amdgpu_device pointer
  1010. *
  1011. * Calculate and program the display watermarks and line buffer allocation.
  1012. */
  1013. static void dm_bandwidth_update(struct amdgpu_device *adev)
  1014. {
  1015. AMDGPU_DM_NOT_IMPL("%s\n", __func__);
  1016. }
  1017. static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
  1018. u8 level)
  1019. {
  1020. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  1021. AMDGPU_DM_NOT_IMPL("%s\n", __func__);
  1022. }
  1023. static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
  1024. {
  1025. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  1026. AMDGPU_DM_NOT_IMPL("%s\n", __func__);
  1027. return 0;
  1028. }
  1029. /******************************************************************************
  1030. * Page Flip functions
  1031. ******************************************************************************/
  1032. /**
  1033. * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
  1034. * via DRM IOCTL, by user mode.
  1035. *
  1036. * @adev: amdgpu_device pointer
  1037. * @crtc_id: crtc to cleanup pageflip on
  1038. * @crtc_base: new address of the crtc (GPU MC address)
  1039. *
  1040. * Does the actual pageflip (surface address update).
  1041. */
  1042. static void dm_page_flip(struct amdgpu_device *adev,
  1043. int crtc_id, u64 crtc_base, bool async)
  1044. {
  1045. struct amdgpu_crtc *acrtc;
  1046. struct dc_target *target;
  1047. struct dc_flip_addrs addr = { {0} };
  1048. /*
  1049. * TODO risk of concurrency issues
  1050. *
  1051. * This should guarded by the dal_mutex but we can't do this since the
  1052. * caller uses a spin_lock on event_lock.
  1053. *
  1054. * If we wait on the dal_mutex a second page flip interrupt might come,
  1055. * spin on the event_lock, disabling interrupts while it does so. At
  1056. * this point the core can no longer be pre-empted and return to the
  1057. * thread that waited on the dal_mutex and we're deadlocked.
  1058. *
  1059. * With multiple cores the same essentially happens but might just take
  1060. * a little longer to lock up all cores.
  1061. *
  1062. * The reason we should lock on dal_mutex is so that we can be sure
  1063. * nobody messes with acrtc->target after we read and check its value.
  1064. *
  1065. * We might be able to fix our concurrency issues with a work queue
  1066. * where we schedule all work items (mode_set, page_flip, etc.) and
  1067. * execute them one by one. Care needs to be taken to still deal with
  1068. * any potential concurrency issues arising from interrupt calls.
  1069. */
  1070. acrtc = adev->mode_info.crtcs[crtc_id];
  1071. target = acrtc->target;
  1072. /*
  1073. * Received a page flip call after the display has been reset.
  1074. * Just return in this case. Everything should be clean-up on reset.
  1075. */
  1076. if (!target) {
  1077. WARN_ON(1);
  1078. return;
  1079. }
  1080. addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
  1081. addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
  1082. addr.flip_immediate = async;
  1083. DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
  1084. __func__,
  1085. addr.address.grph.addr.high_part,
  1086. addr.address.grph.addr.low_part);
  1087. dc_flip_surface_addrs(
  1088. adev->dm.dc,
  1089. dc_target_get_status(target)->surfaces,
  1090. &addr, 1);
  1091. }
  1092. static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
  1093. struct drm_file *filp)
  1094. {
  1095. struct mod_freesync_params freesync_params;
  1096. uint8_t num_targets;
  1097. uint8_t i;
  1098. struct dc_target *target;
  1099. struct amdgpu_device *adev = dev->dev_private;
  1100. int r = 0;
  1101. /* Get freesync enable flag from DRM */
  1102. num_targets = dc_get_current_target_count(adev->dm.dc);
  1103. for (i = 0; i < num_targets; i++) {
  1104. target = dc_get_target_at_index(adev->dm.dc, i);
  1105. mod_freesync_update_state(adev->dm.freesync_module,
  1106. target->streams,
  1107. target->stream_count,
  1108. &freesync_params);
  1109. }
  1110. return r;
  1111. }
  1112. static const struct amdgpu_display_funcs dm_display_funcs = {
  1113. .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
  1114. .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
  1115. .vblank_wait = NULL,
  1116. .backlight_set_level =
  1117. dm_set_backlight_level,/* called unconditionally */
  1118. .backlight_get_level =
  1119. dm_get_backlight_level,/* called unconditionally */
  1120. .hpd_sense = NULL,/* called unconditionally */
  1121. .hpd_set_polarity = NULL, /* called unconditionally */
  1122. .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
  1123. .page_flip = dm_page_flip, /* called unconditionally */
  1124. .page_flip_get_scanoutpos =
  1125. dm_crtc_get_scanoutpos,/* called unconditionally */
  1126. .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
  1127. .add_connector = NULL, /* VBIOS parsing. DAL does it. */
  1128. .notify_freesync = amdgpu_notify_freesync,
  1129. };
  1130. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1131. static ssize_t s3_debug_store(
  1132. struct device *device,
  1133. struct device_attribute *attr,
  1134. const char *buf,
  1135. size_t count)
  1136. {
  1137. int ret;
  1138. int s3_state;
  1139. struct pci_dev *pdev = to_pci_dev(device);
  1140. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  1141. struct amdgpu_device *adev = drm_dev->dev_private;
  1142. ret = kstrtoint(buf, 0, &s3_state);
  1143. if (ret == 0) {
  1144. if (s3_state) {
  1145. dm_resume(adev);
  1146. amdgpu_dm_display_resume(adev);
  1147. drm_kms_helper_hotplug_event(adev->ddev);
  1148. } else
  1149. dm_suspend(adev);
  1150. }
  1151. return ret == 0 ? count : 0;
  1152. }
  1153. DEVICE_ATTR_WO(s3_debug);
  1154. #endif
  1155. static int dm_early_init(void *handle)
  1156. {
  1157. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1158. amdgpu_dm_set_irq_funcs(adev);
  1159. switch (adev->asic_type) {
  1160. case CHIP_BONAIRE:
  1161. case CHIP_HAWAII:
  1162. adev->mode_info.num_crtc = 6;
  1163. adev->mode_info.num_hpd = 6;
  1164. adev->mode_info.num_dig = 6;
  1165. break;
  1166. case CHIP_FIJI:
  1167. case CHIP_TONGA:
  1168. adev->mode_info.num_crtc = 6;
  1169. adev->mode_info.num_hpd = 6;
  1170. adev->mode_info.num_dig = 7;
  1171. break;
  1172. case CHIP_CARRIZO:
  1173. adev->mode_info.num_crtc = 3;
  1174. adev->mode_info.num_hpd = 6;
  1175. adev->mode_info.num_dig = 9;
  1176. break;
  1177. case CHIP_STONEY:
  1178. adev->mode_info.num_crtc = 2;
  1179. adev->mode_info.num_hpd = 6;
  1180. adev->mode_info.num_dig = 9;
  1181. break;
  1182. case CHIP_POLARIS11:
  1183. adev->mode_info.num_crtc = 5;
  1184. adev->mode_info.num_hpd = 5;
  1185. adev->mode_info.num_dig = 5;
  1186. break;
  1187. case CHIP_POLARIS10:
  1188. adev->mode_info.num_crtc = 6;
  1189. adev->mode_info.num_hpd = 6;
  1190. adev->mode_info.num_dig = 6;
  1191. break;
  1192. default:
  1193. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  1194. return -EINVAL;
  1195. }
  1196. if (adev->mode_info.funcs == NULL)
  1197. adev->mode_info.funcs = &dm_display_funcs;
  1198. /* Note: Do NOT change adev->audio_endpt_rreg and
  1199. * adev->audio_endpt_wreg because they are initialised in
  1200. * amdgpu_device_init() */
  1201. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1202. device_create_file(
  1203. adev->ddev->dev,
  1204. &dev_attr_s3_debug);
  1205. #endif
  1206. return 0;
  1207. }
  1208. bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
  1209. {
  1210. /* TODO */
  1211. return true;
  1212. }
  1213. bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
  1214. {
  1215. /* TODO */
  1216. return true;
  1217. }