amdgpu_dm.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: AMD
  23. *
  24. */
  25. #include "dm_services_types.h"
  26. #include "dc.h"
  27. #include "vid.h"
  28. #include "amdgpu.h"
  29. #include "amdgpu_display.h"
  30. #include "atom.h"
  31. #include "amdgpu_dm.h"
  32. #include "amdgpu_dm_types.h"
  33. #include "amd_shared.h"
  34. #include "amdgpu_dm_irq.h"
  35. #include "dm_helpers.h"
  36. #include "ivsrcid/ivsrcid_vislands30.h"
  37. #include <linux/module.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/version.h>
  40. #include <drm/drm_atomic.h>
  41. #include <drm/drm_atomic_helper.h>
  42. #include <drm/drm_dp_mst_helper.h>
  43. #include "modules/inc/mod_freesync.h"
  44. static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = {
  45. DRM_PLANE_TYPE_PRIMARY,
  46. DRM_PLANE_TYPE_PRIMARY,
  47. DRM_PLANE_TYPE_PRIMARY,
  48. DRM_PLANE_TYPE_PRIMARY,
  49. DRM_PLANE_TYPE_PRIMARY,
  50. DRM_PLANE_TYPE_PRIMARY,
  51. };
  52. static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = {
  53. DRM_PLANE_TYPE_PRIMARY,
  54. DRM_PLANE_TYPE_PRIMARY,
  55. DRM_PLANE_TYPE_PRIMARY,
  56. DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
  57. };
  58. static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = {
  59. DRM_PLANE_TYPE_PRIMARY,
  60. DRM_PLANE_TYPE_PRIMARY,
  61. DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
  62. };
  63. /*
  64. * dm_vblank_get_counter
  65. *
  66. * @brief
  67. * Get counter for number of vertical blanks
  68. *
  69. * @param
  70. * struct amdgpu_device *adev - [in] desired amdgpu device
  71. * int disp_idx - [in] which CRTC to get the counter from
  72. *
  73. * @return
  74. * Counter for vertical blanks
  75. */
  76. static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
  77. {
  78. if (crtc >= adev->mode_info.num_crtc)
  79. return 0;
  80. else {
  81. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  82. if (NULL == acrtc->stream) {
  83. DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
  84. return 0;
  85. }
  86. return dc_stream_get_vblank_counter(acrtc->stream);
  87. }
  88. }
  89. static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
  90. u32 *vbl, u32 *position)
  91. {
  92. if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  93. return -EINVAL;
  94. else {
  95. struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
  96. if (NULL == acrtc->stream) {
  97. DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
  98. return 0;
  99. }
  100. return dc_stream_get_scanoutpos(acrtc->stream, vbl, position);
  101. }
  102. return 0;
  103. }
  104. static bool dm_is_idle(void *handle)
  105. {
  106. /* XXX todo */
  107. return true;
  108. }
  109. static int dm_wait_for_idle(void *handle)
  110. {
  111. /* XXX todo */
  112. return 0;
  113. }
  114. static bool dm_check_soft_reset(void *handle)
  115. {
  116. return false;
  117. }
  118. static int dm_soft_reset(void *handle)
  119. {
  120. /* XXX todo */
  121. return 0;
  122. }
  123. static struct amdgpu_crtc *get_crtc_by_otg_inst(
  124. struct amdgpu_device *adev,
  125. int otg_inst)
  126. {
  127. struct drm_device *dev = adev->ddev;
  128. struct drm_crtc *crtc;
  129. struct amdgpu_crtc *amdgpu_crtc;
  130. /*
  131. * following if is check inherited from both functions where this one is
  132. * used now. Need to be checked why it could happen.
  133. */
  134. if (otg_inst == -1) {
  135. WARN_ON(1);
  136. return adev->mode_info.crtcs[0];
  137. }
  138. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  139. amdgpu_crtc = to_amdgpu_crtc(crtc);
  140. if (amdgpu_crtc->otg_inst == otg_inst)
  141. return amdgpu_crtc;
  142. }
  143. return NULL;
  144. }
  145. static void dm_pflip_high_irq(void *interrupt_params)
  146. {
  147. struct amdgpu_crtc *amdgpu_crtc;
  148. struct common_irq_params *irq_params = interrupt_params;
  149. struct amdgpu_device *adev = irq_params->adev;
  150. unsigned long flags;
  151. amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
  152. /* IRQ could occur when in initial stage */
  153. /*TODO work and BO cleanup */
  154. if (amdgpu_crtc == NULL) {
  155. DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
  156. return;
  157. }
  158. spin_lock_irqsave(&adev->ddev->event_lock, flags);
  159. if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
  160. DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
  161. amdgpu_crtc->pflip_status,
  162. AMDGPU_FLIP_SUBMITTED,
  163. amdgpu_crtc->crtc_id,
  164. amdgpu_crtc);
  165. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  166. return;
  167. }
  168. /* wakeup usersapce */
  169. if (amdgpu_crtc->event
  170. && amdgpu_crtc->event->event.base.type
  171. == DRM_EVENT_FLIP_COMPLETE) {
  172. drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
  173. /* page flip completed. clean up */
  174. amdgpu_crtc->event = NULL;
  175. } else
  176. WARN_ON(1);
  177. amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
  178. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  179. DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
  180. __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
  181. drm_crtc_vblank_put(&amdgpu_crtc->base);
  182. }
  183. static void dm_crtc_high_irq(void *interrupt_params)
  184. {
  185. struct common_irq_params *irq_params = interrupt_params;
  186. struct amdgpu_device *adev = irq_params->adev;
  187. uint8_t crtc_index = 0;
  188. struct amdgpu_crtc *acrtc;
  189. acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
  190. if (acrtc)
  191. crtc_index = acrtc->crtc_id;
  192. drm_handle_vblank(adev->ddev, crtc_index);
  193. }
  194. static int dm_set_clockgating_state(void *handle,
  195. enum amd_clockgating_state state)
  196. {
  197. return 0;
  198. }
  199. static int dm_set_powergating_state(void *handle,
  200. enum amd_powergating_state state)
  201. {
  202. return 0;
  203. }
  204. /* Prototypes of private functions */
  205. static int dm_early_init(void* handle);
  206. static void hotplug_notify_work_func(struct work_struct *work)
  207. {
  208. struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
  209. struct drm_device *dev = dm->ddev;
  210. drm_kms_helper_hotplug_event(dev);
  211. }
  212. /* Init display KMS
  213. *
  214. * Returns 0 on success
  215. */
  216. int amdgpu_dm_init(struct amdgpu_device *adev)
  217. {
  218. struct dc_init_data init_data;
  219. adev->dm.ddev = adev->ddev;
  220. adev->dm.adev = adev;
  221. DRM_INFO("DAL is enabled\n");
  222. /* Zero all the fields */
  223. memset(&init_data, 0, sizeof(init_data));
  224. /* initialize DAL's lock (for SYNC context use) */
  225. spin_lock_init(&adev->dm.dal_lock);
  226. /* initialize DAL's mutex */
  227. mutex_init(&adev->dm.dal_mutex);
  228. if(amdgpu_dm_irq_init(adev)) {
  229. DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
  230. goto error;
  231. }
  232. init_data.asic_id.chip_family = adev->family;
  233. init_data.asic_id.pci_revision_id = adev->rev_id;
  234. init_data.asic_id.hw_internal_rev = adev->external_rev_id;
  235. init_data.asic_id.vram_width = adev->mc.vram_width;
  236. /* TODO: initialize init_data.asic_id.vram_type here!!!! */
  237. init_data.asic_id.atombios_base_address =
  238. adev->mode_info.atom_context->bios;
  239. init_data.driver = adev;
  240. adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
  241. if (!adev->dm.cgs_device) {
  242. DRM_ERROR("amdgpu: failed to create cgs device.\n");
  243. goto error;
  244. }
  245. init_data.cgs_device = adev->dm.cgs_device;
  246. adev->dm.dal = NULL;
  247. init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
  248. /* Display Core create. */
  249. adev->dm.dc = dc_create(&init_data);
  250. if (!adev->dm.dc)
  251. DRM_INFO("Display Core failed to initialize!\n");
  252. INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
  253. adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
  254. if (!adev->dm.freesync_module) {
  255. DRM_ERROR(
  256. "amdgpu: failed to initialize freesync_module.\n");
  257. } else
  258. DRM_INFO("amdgpu: freesync_module init done %p.\n",
  259. adev->dm.freesync_module);
  260. if (amdgpu_dm_initialize_drm_device(adev)) {
  261. DRM_ERROR(
  262. "amdgpu: failed to initialize sw for display support.\n");
  263. goto error;
  264. }
  265. /* Update the actual used number of crtc */
  266. adev->mode_info.num_crtc = adev->dm.display_indexes_num;
  267. /* TODO: Add_display_info? */
  268. /* TODO use dynamic cursor width */
  269. adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
  270. adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
  271. if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
  272. DRM_ERROR(
  273. "amdgpu: failed to initialize sw for display support.\n");
  274. goto error;
  275. }
  276. DRM_INFO("KMS initialized.\n");
  277. return 0;
  278. error:
  279. amdgpu_dm_fini(adev);
  280. return -1;
  281. }
  282. void amdgpu_dm_fini(struct amdgpu_device *adev)
  283. {
  284. amdgpu_dm_destroy_drm_device(&adev->dm);
  285. /*
  286. * TODO: pageflip, vlank interrupt
  287. *
  288. * amdgpu_dm_irq_fini(adev);
  289. */
  290. if (adev->dm.cgs_device) {
  291. amdgpu_cgs_destroy_device(adev->dm.cgs_device);
  292. adev->dm.cgs_device = NULL;
  293. }
  294. if (adev->dm.freesync_module) {
  295. mod_freesync_destroy(adev->dm.freesync_module);
  296. adev->dm.freesync_module = NULL;
  297. }
  298. /* DC Destroy TODO: Replace destroy DAL */
  299. {
  300. dc_destroy(&adev->dm.dc);
  301. }
  302. return;
  303. }
  304. /* moved from amdgpu_dm_kms.c */
  305. void amdgpu_dm_destroy()
  306. {
  307. }
  308. static int dm_sw_init(void *handle)
  309. {
  310. return 0;
  311. }
  312. static int dm_sw_fini(void *handle)
  313. {
  314. return 0;
  315. }
  316. static int detect_mst_link_for_all_connectors(struct drm_device *dev)
  317. {
  318. struct amdgpu_connector *aconnector;
  319. struct drm_connector *connector;
  320. int ret = 0;
  321. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  322. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  323. aconnector = to_amdgpu_connector(connector);
  324. if (aconnector->dc_link->type == dc_connection_mst_branch) {
  325. DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
  326. aconnector, aconnector->base.base.id);
  327. ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
  328. if (ret < 0) {
  329. DRM_ERROR("DM_MST: Failed to start MST\n");
  330. ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
  331. return ret;
  332. }
  333. }
  334. }
  335. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  336. return ret;
  337. }
  338. static int dm_late_init(void *handle)
  339. {
  340. struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
  341. int r = detect_mst_link_for_all_connectors(dev);
  342. return r;
  343. }
  344. static void s3_handle_mst(struct drm_device *dev, bool suspend)
  345. {
  346. struct amdgpu_connector *aconnector;
  347. struct drm_connector *connector;
  348. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  349. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  350. aconnector = to_amdgpu_connector(connector);
  351. if (aconnector->dc_link->type == dc_connection_mst_branch &&
  352. !aconnector->mst_port) {
  353. if (suspend)
  354. drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
  355. else
  356. drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
  357. }
  358. }
  359. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  360. }
  361. static int dm_hw_init(void *handle)
  362. {
  363. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  364. /* Create DAL display manager */
  365. amdgpu_dm_init(adev);
  366. amdgpu_dm_hpd_init(adev);
  367. return 0;
  368. }
  369. static int dm_hw_fini(void *handle)
  370. {
  371. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  372. amdgpu_dm_hpd_fini(adev);
  373. amdgpu_dm_irq_fini(adev);
  374. return 0;
  375. }
  376. static int dm_suspend(void *handle)
  377. {
  378. struct amdgpu_device *adev = handle;
  379. struct amdgpu_display_manager *dm = &adev->dm;
  380. int ret = 0;
  381. s3_handle_mst(adev->ddev, true);
  382. amdgpu_dm_irq_suspend(adev);
  383. adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
  384. dc_set_power_state(
  385. dm->dc,
  386. DC_ACPI_CM_POWER_STATE_D3
  387. );
  388. return ret;
  389. }
  390. struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
  391. struct drm_atomic_state *state,
  392. struct drm_crtc *crtc,
  393. bool from_state_var)
  394. {
  395. uint32_t i;
  396. struct drm_connector_state *conn_state;
  397. struct drm_connector *connector;
  398. struct drm_crtc *crtc_from_state;
  399. for_each_connector_in_state(
  400. state,
  401. connector,
  402. conn_state,
  403. i) {
  404. crtc_from_state =
  405. from_state_var ?
  406. conn_state->crtc :
  407. connector->state->crtc;
  408. if (crtc_from_state == crtc)
  409. return to_amdgpu_connector(connector);
  410. }
  411. return NULL;
  412. }
  413. static int dm_resume(void *handle)
  414. {
  415. struct amdgpu_device *adev = handle;
  416. struct amdgpu_display_manager *dm = &adev->dm;
  417. /* power on hardware */
  418. dc_set_power_state(
  419. dm->dc,
  420. DC_ACPI_CM_POWER_STATE_D0
  421. );
  422. return 0;
  423. }
  424. int amdgpu_dm_display_resume(struct amdgpu_device *adev )
  425. {
  426. struct drm_device *ddev = adev->ddev;
  427. struct amdgpu_display_manager *dm = &adev->dm;
  428. struct amdgpu_connector *aconnector;
  429. struct drm_connector *connector;
  430. struct drm_crtc *crtc;
  431. struct drm_crtc_state *crtc_state;
  432. int ret = 0;
  433. int i;
  434. /* program HPD filter */
  435. dc_resume(dm->dc);
  436. /* On resume we need to rewrite the MSTM control bits to enamble MST*/
  437. s3_handle_mst(ddev, false);
  438. /*
  439. * early enable HPD Rx IRQ, should be done before set mode as short
  440. * pulse interrupts are used for MST
  441. */
  442. amdgpu_dm_irq_resume_early(adev);
  443. /* Do detection*/
  444. list_for_each_entry(connector,
  445. &ddev->mode_config.connector_list, head) {
  446. aconnector = to_amdgpu_connector(connector);
  447. /*
  448. * this is the case when traversing through already created
  449. * MST connectors, should be skipped
  450. */
  451. if (aconnector->mst_port)
  452. continue;
  453. dc_link_detect(aconnector->dc_link, false);
  454. aconnector->dc_sink = NULL;
  455. amdgpu_dm_update_connector_after_detect(aconnector);
  456. }
  457. /* Force mode set in atomic comit */
  458. for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
  459. crtc_state->active_changed = true;
  460. ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
  461. amdgpu_dm_irq_resume_late(adev);
  462. return ret;
  463. }
  464. static const struct amd_ip_funcs amdgpu_dm_funcs = {
  465. .name = "dm",
  466. .early_init = dm_early_init,
  467. .late_init = dm_late_init,
  468. .sw_init = dm_sw_init,
  469. .sw_fini = dm_sw_fini,
  470. .hw_init = dm_hw_init,
  471. .hw_fini = dm_hw_fini,
  472. .suspend = dm_suspend,
  473. .resume = dm_resume,
  474. .is_idle = dm_is_idle,
  475. .wait_for_idle = dm_wait_for_idle,
  476. .check_soft_reset = dm_check_soft_reset,
  477. .soft_reset = dm_soft_reset,
  478. .set_clockgating_state = dm_set_clockgating_state,
  479. .set_powergating_state = dm_set_powergating_state,
  480. };
  481. const struct amdgpu_ip_block_version dm_ip_block =
  482. {
  483. .type = AMD_IP_BLOCK_TYPE_DCE,
  484. .major = 1,
  485. .minor = 0,
  486. .rev = 0,
  487. .funcs = &amdgpu_dm_funcs,
  488. };
  489. /* TODO: it is temporary non-const, should fixed later */
  490. static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
  491. .fb_create = amdgpu_user_framebuffer_create,
  492. .output_poll_changed = amdgpu_output_poll_changed,
  493. .atomic_check = amdgpu_dm_atomic_check,
  494. .atomic_commit = drm_atomic_helper_commit
  495. };
  496. static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
  497. .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
  498. };
  499. void amdgpu_dm_update_connector_after_detect(
  500. struct amdgpu_connector *aconnector)
  501. {
  502. struct drm_connector *connector = &aconnector->base;
  503. struct drm_device *dev = connector->dev;
  504. const struct dc_sink *sink;
  505. /* MST handled by drm_mst framework */
  506. if (aconnector->mst_mgr.mst_state == true)
  507. return;
  508. sink = aconnector->dc_link->local_sink;
  509. /* Edid mgmt connector gets first update only in mode_valid hook and then
  510. * the connector sink is set to either fake or physical sink depends on link status.
  511. * don't do it here if u are during boot
  512. */
  513. if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
  514. && aconnector->dc_em_sink) {
  515. /* For S3 resume with headless use eml_sink to fake stream
  516. * because on resume connecotr->sink is set ti NULL
  517. */
  518. mutex_lock(&dev->mode_config.mutex);
  519. if (sink) {
  520. if (aconnector->dc_sink) {
  521. amdgpu_dm_remove_sink_from_freesync_module(
  522. connector);
  523. /* retain and release bellow are used for
  524. * bump up refcount for sink because the link don't point
  525. * to it anymore after disconnect so on next crtc to connector
  526. * reshuffle by UMD we will get into unwanted dc_sink release
  527. */
  528. if (aconnector->dc_sink != aconnector->dc_em_sink)
  529. dc_sink_release(aconnector->dc_sink);
  530. }
  531. aconnector->dc_sink = sink;
  532. amdgpu_dm_add_sink_to_freesync_module(
  533. connector, aconnector->edid);
  534. } else {
  535. amdgpu_dm_remove_sink_from_freesync_module(connector);
  536. if (!aconnector->dc_sink)
  537. aconnector->dc_sink = aconnector->dc_em_sink;
  538. else if (aconnector->dc_sink != aconnector->dc_em_sink)
  539. dc_sink_retain(aconnector->dc_sink);
  540. }
  541. mutex_unlock(&dev->mode_config.mutex);
  542. return;
  543. }
  544. /*
  545. * TODO: temporary guard to look for proper fix
  546. * if this sink is MST sink, we should not do anything
  547. */
  548. if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
  549. return;
  550. if (aconnector->dc_sink == sink) {
  551. /* We got a DP short pulse (Link Loss, DP CTS, etc...).
  552. * Do nothing!! */
  553. DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
  554. aconnector->connector_id);
  555. return;
  556. }
  557. DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
  558. aconnector->connector_id, aconnector->dc_sink, sink);
  559. mutex_lock(&dev->mode_config.mutex);
  560. /* 1. Update status of the drm connector
  561. * 2. Send an event and let userspace tell us what to do */
  562. if (sink) {
  563. /* TODO: check if we still need the S3 mode update workaround.
  564. * If yes, put it here. */
  565. if (aconnector->dc_sink)
  566. amdgpu_dm_remove_sink_from_freesync_module(
  567. connector);
  568. aconnector->dc_sink = sink;
  569. if (sink->dc_edid.length == 0)
  570. aconnector->edid = NULL;
  571. else {
  572. aconnector->edid =
  573. (struct edid *) sink->dc_edid.raw_edid;
  574. drm_mode_connector_update_edid_property(connector,
  575. aconnector->edid);
  576. }
  577. amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
  578. } else {
  579. amdgpu_dm_remove_sink_from_freesync_module(connector);
  580. drm_mode_connector_update_edid_property(connector, NULL);
  581. aconnector->num_modes = 0;
  582. aconnector->dc_sink = NULL;
  583. }
  584. mutex_unlock(&dev->mode_config.mutex);
  585. }
  586. static void handle_hpd_irq(void *param)
  587. {
  588. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  589. struct drm_connector *connector = &aconnector->base;
  590. struct drm_device *dev = connector->dev;
  591. /* In case of failure or MST no need to update connector status or notify the OS
  592. * since (for MST case) MST does this in it's own context.
  593. */
  594. mutex_lock(&aconnector->hpd_lock);
  595. if (dc_link_detect(aconnector->dc_link, false)) {
  596. amdgpu_dm_update_connector_after_detect(aconnector);
  597. drm_modeset_lock_all(dev);
  598. dm_restore_drm_connector_state(dev, connector);
  599. drm_modeset_unlock_all(dev);
  600. if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
  601. drm_kms_helper_hotplug_event(dev);
  602. }
  603. mutex_unlock(&aconnector->hpd_lock);
  604. }
  605. static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
  606. {
  607. uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
  608. uint8_t dret;
  609. bool new_irq_handled = false;
  610. int dpcd_addr;
  611. int dpcd_bytes_to_read;
  612. const int max_process_count = 30;
  613. int process_count = 0;
  614. const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
  615. if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
  616. dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
  617. /* DPCD 0x200 - 0x201 for downstream IRQ */
  618. dpcd_addr = DP_SINK_COUNT;
  619. } else {
  620. dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
  621. /* DPCD 0x2002 - 0x2005 for downstream IRQ */
  622. dpcd_addr = DP_SINK_COUNT_ESI;
  623. }
  624. dret = drm_dp_dpcd_read(
  625. &aconnector->dm_dp_aux.aux,
  626. dpcd_addr,
  627. esi,
  628. dpcd_bytes_to_read);
  629. while (dret == dpcd_bytes_to_read &&
  630. process_count < max_process_count) {
  631. uint8_t retry;
  632. dret = 0;
  633. process_count++;
  634. DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
  635. /* handle HPD short pulse irq */
  636. if (aconnector->mst_mgr.mst_state)
  637. drm_dp_mst_hpd_irq(
  638. &aconnector->mst_mgr,
  639. esi,
  640. &new_irq_handled);
  641. if (new_irq_handled) {
  642. /* ACK at DPCD to notify down stream */
  643. const int ack_dpcd_bytes_to_write =
  644. dpcd_bytes_to_read - 1;
  645. for (retry = 0; retry < 3; retry++) {
  646. uint8_t wret;
  647. wret = drm_dp_dpcd_write(
  648. &aconnector->dm_dp_aux.aux,
  649. dpcd_addr + 1,
  650. &esi[1],
  651. ack_dpcd_bytes_to_write);
  652. if (wret == ack_dpcd_bytes_to_write)
  653. break;
  654. }
  655. /* check if there is new irq to be handle */
  656. dret = drm_dp_dpcd_read(
  657. &aconnector->dm_dp_aux.aux,
  658. dpcd_addr,
  659. esi,
  660. dpcd_bytes_to_read);
  661. new_irq_handled = false;
  662. } else
  663. break;
  664. }
  665. if (process_count == max_process_count)
  666. DRM_DEBUG_KMS("Loop exceeded max iterations\n");
  667. }
  668. static void handle_hpd_rx_irq(void *param)
  669. {
  670. struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
  671. struct drm_connector *connector = &aconnector->base;
  672. struct drm_device *dev = connector->dev;
  673. const struct dc_link *dc_link = aconnector->dc_link;
  674. bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
  675. /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
  676. * conflict, after implement i2c helper, this mutex should be
  677. * retired.
  678. */
  679. if (aconnector->dc_link->type != dc_connection_mst_branch)
  680. mutex_lock(&aconnector->hpd_lock);
  681. if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
  682. !is_mst_root_connector) {
  683. /* Downstream Port status changed. */
  684. if (dc_link_detect(aconnector->dc_link, false)) {
  685. amdgpu_dm_update_connector_after_detect(aconnector);
  686. drm_modeset_lock_all(dev);
  687. dm_restore_drm_connector_state(dev, connector);
  688. drm_modeset_unlock_all(dev);
  689. drm_kms_helper_hotplug_event(dev);
  690. }
  691. }
  692. if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
  693. (dc_link->type == dc_connection_mst_branch))
  694. dm_handle_hpd_rx_irq(aconnector);
  695. if (aconnector->dc_link->type != dc_connection_mst_branch)
  696. mutex_unlock(&aconnector->hpd_lock);
  697. }
  698. static void register_hpd_handlers(struct amdgpu_device *adev)
  699. {
  700. struct drm_device *dev = adev->ddev;
  701. struct drm_connector *connector;
  702. struct amdgpu_connector *aconnector;
  703. const struct dc_link *dc_link;
  704. struct dc_interrupt_params int_params = {0};
  705. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  706. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  707. list_for_each_entry(connector,
  708. &dev->mode_config.connector_list, head) {
  709. aconnector = to_amdgpu_connector(connector);
  710. dc_link = aconnector->dc_link;
  711. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
  712. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  713. int_params.irq_source = dc_link->irq_source_hpd;
  714. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  715. handle_hpd_irq,
  716. (void *) aconnector);
  717. }
  718. if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
  719. /* Also register for DP short pulse (hpd_rx). */
  720. int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
  721. int_params.irq_source = dc_link->irq_source_hpd_rx;
  722. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  723. handle_hpd_rx_irq,
  724. (void *) aconnector);
  725. }
  726. }
  727. }
  728. /* Register IRQ sources and initialize IRQ callbacks */
  729. static int dce110_register_irq_handlers(struct amdgpu_device *adev)
  730. {
  731. struct dc *dc = adev->dm.dc;
  732. struct common_irq_params *c_irq_params;
  733. struct dc_interrupt_params int_params = {0};
  734. int r;
  735. int i;
  736. unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
  737. if (adev->asic_type == CHIP_VEGA10)
  738. client_id = AMDGPU_IH_CLIENTID_DCE;
  739. int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
  740. int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
  741. /* Actions of amdgpu_irq_add_id():
  742. * 1. Register a set() function with base driver.
  743. * Base driver will call set() function to enable/disable an
  744. * interrupt in DC hardware.
  745. * 2. Register amdgpu_dm_irq_handler().
  746. * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
  747. * coming from DC hardware.
  748. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
  749. * for acknowledging and handling. */
  750. /* Use VBLANK interrupt */
  751. for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
  752. r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
  753. if (r) {
  754. DRM_ERROR("Failed to add crtc irq id!\n");
  755. return r;
  756. }
  757. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  758. int_params.irq_source =
  759. dc_interrupt_to_irq_source(dc, i, 0);
  760. c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
  761. c_irq_params->adev = adev;
  762. c_irq_params->irq_src = int_params.irq_source;
  763. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  764. dm_crtc_high_irq, c_irq_params);
  765. }
  766. /* Use GRPH_PFLIP interrupt */
  767. for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
  768. i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
  769. r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
  770. if (r) {
  771. DRM_ERROR("Failed to add page flip irq id!\n");
  772. return r;
  773. }
  774. int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
  775. int_params.irq_source =
  776. dc_interrupt_to_irq_source(dc, i, 0);
  777. c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
  778. c_irq_params->adev = adev;
  779. c_irq_params->irq_src = int_params.irq_source;
  780. amdgpu_dm_irq_register_interrupt(adev, &int_params,
  781. dm_pflip_high_irq, c_irq_params);
  782. }
  783. /* HPD */
  784. r = amdgpu_irq_add_id(adev, client_id,
  785. VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
  786. if (r) {
  787. DRM_ERROR("Failed to add hpd irq id!\n");
  788. return r;
  789. }
  790. register_hpd_handlers(adev);
  791. return 0;
  792. }
  793. static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
  794. {
  795. int r;
  796. adev->mode_info.mode_config_initialized = true;
  797. adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
  798. adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
  799. adev->ddev->mode_config.max_width = 16384;
  800. adev->ddev->mode_config.max_height = 16384;
  801. adev->ddev->mode_config.preferred_depth = 24;
  802. adev->ddev->mode_config.prefer_shadow = 1;
  803. /* indicate support of immediate flip */
  804. adev->ddev->mode_config.async_page_flip = true;
  805. adev->ddev->mode_config.fb_base = adev->mc.aper_base;
  806. r = amdgpu_modeset_create_props(adev);
  807. if (r)
  808. return r;
  809. return 0;
  810. }
  811. #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
  812. defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
  813. static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
  814. {
  815. struct amdgpu_display_manager *dm = bl_get_data(bd);
  816. if (dc_link_set_backlight_level(dm->backlight_link,
  817. bd->props.brightness, 0, 0))
  818. return 0;
  819. else
  820. return 1;
  821. }
  822. static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
  823. {
  824. return bd->props.brightness;
  825. }
  826. static const struct backlight_ops amdgpu_dm_backlight_ops = {
  827. .get_brightness = amdgpu_dm_backlight_get_brightness,
  828. .update_status = amdgpu_dm_backlight_update_status,
  829. };
  830. void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
  831. {
  832. char bl_name[16];
  833. struct backlight_properties props = { 0 };
  834. props.max_brightness = AMDGPU_MAX_BL_LEVEL;
  835. props.type = BACKLIGHT_RAW;
  836. snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
  837. dm->adev->ddev->primary->index);
  838. dm->backlight_dev = backlight_device_register(bl_name,
  839. dm->adev->ddev->dev,
  840. dm,
  841. &amdgpu_dm_backlight_ops,
  842. &props);
  843. if (NULL == dm->backlight_dev)
  844. DRM_ERROR("DM: Backlight registration failed!\n");
  845. else
  846. DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
  847. }
  848. #endif
  849. /* In this architecture, the association
  850. * connector -> encoder -> crtc
  851. * id not really requried. The crtc and connector will hold the
  852. * display_index as an abstraction to use with DAL component
  853. *
  854. * Returns 0 on success
  855. */
  856. int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
  857. {
  858. struct amdgpu_display_manager *dm = &adev->dm;
  859. uint32_t i;
  860. struct amdgpu_connector *aconnector = NULL;
  861. struct amdgpu_encoder *aencoder = NULL;
  862. struct amdgpu_mode_info *mode_info = &adev->mode_info;
  863. uint32_t link_cnt;
  864. link_cnt = dm->dc->caps.max_links;
  865. if (amdgpu_dm_mode_config_init(dm->adev)) {
  866. DRM_ERROR("DM: Failed to initialize mode config\n");
  867. return -1;
  868. }
  869. for (i = 0; i < dm->dc->caps.max_surfaces; i++) {
  870. mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
  871. GFP_KERNEL);
  872. if (!mode_info->planes[i]) {
  873. DRM_ERROR("KMS: Failed to allocate surface\n");
  874. goto fail_free_planes;
  875. }
  876. mode_info->planes[i]->plane_type = mode_info->plane_type[i];
  877. if (amdgpu_dm_plane_init(dm, mode_info->planes[i], 1)) {
  878. DRM_ERROR("KMS: Failed to initialize plane\n");
  879. goto fail_free_planes;
  880. }
  881. }
  882. for (i = 0; i < dm->dc->caps.max_streams; i++)
  883. if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
  884. DRM_ERROR("KMS: Failed to initialize crtc\n");
  885. goto fail_free_planes;
  886. }
  887. dm->display_indexes_num = dm->dc->caps.max_streams;
  888. /* loops over all connectors on the board */
  889. for (i = 0; i < link_cnt; i++) {
  890. if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
  891. DRM_ERROR(
  892. "KMS: Cannot support more than %d display indexes\n",
  893. AMDGPU_DM_MAX_DISPLAY_INDEX);
  894. continue;
  895. }
  896. aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
  897. if (!aconnector)
  898. goto fail_free_planes;
  899. aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
  900. if (!aencoder) {
  901. goto fail_free_connector;
  902. }
  903. if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
  904. DRM_ERROR("KMS: Failed to initialize encoder\n");
  905. goto fail_free_encoder;
  906. }
  907. if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
  908. DRM_ERROR("KMS: Failed to initialize connector\n");
  909. goto fail_free_encoder;
  910. }
  911. if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
  912. amdgpu_dm_update_connector_after_detect(aconnector);
  913. }
  914. /* Software is initialized. Now we can register interrupt handlers. */
  915. switch (adev->asic_type) {
  916. case CHIP_BONAIRE:
  917. case CHIP_HAWAII:
  918. case CHIP_TONGA:
  919. case CHIP_FIJI:
  920. case CHIP_CARRIZO:
  921. case CHIP_STONEY:
  922. case CHIP_POLARIS11:
  923. case CHIP_POLARIS10:
  924. case CHIP_POLARIS12:
  925. case CHIP_VEGA10:
  926. if (dce110_register_irq_handlers(dm->adev)) {
  927. DRM_ERROR("DM: Failed to initialize IRQ\n");
  928. goto fail_free_encoder;
  929. }
  930. break;
  931. default:
  932. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  933. goto fail_free_encoder;
  934. }
  935. drm_mode_config_reset(dm->ddev);
  936. return 0;
  937. fail_free_encoder:
  938. kfree(aencoder);
  939. fail_free_connector:
  940. kfree(aconnector);
  941. fail_free_planes:
  942. for (i = 0; i < dm->dc->caps.max_surfaces; i++)
  943. kfree(mode_info->planes[i]);
  944. return -1;
  945. }
  946. void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
  947. {
  948. drm_mode_config_cleanup(dm->ddev);
  949. return;
  950. }
  951. /******************************************************************************
  952. * amdgpu_display_funcs functions
  953. *****************************************************************************/
  954. /**
  955. * dm_bandwidth_update - program display watermarks
  956. *
  957. * @adev: amdgpu_device pointer
  958. *
  959. * Calculate and program the display watermarks and line buffer allocation.
  960. */
  961. static void dm_bandwidth_update(struct amdgpu_device *adev)
  962. {
  963. /* TODO: implement later */
  964. }
  965. static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
  966. u8 level)
  967. {
  968. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  969. }
  970. static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
  971. {
  972. /* TODO: translate amdgpu_encoder to display_index and call DAL */
  973. return 0;
  974. }
  975. /******************************************************************************
  976. * Page Flip functions
  977. ******************************************************************************/
  978. /**
  979. * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
  980. * via DRM IOCTL, by user mode.
  981. *
  982. * @adev: amdgpu_device pointer
  983. * @crtc_id: crtc to cleanup pageflip on
  984. * @crtc_base: new address of the crtc (GPU MC address)
  985. *
  986. * Does the actual pageflip (surface address update).
  987. */
  988. static void dm_page_flip(struct amdgpu_device *adev,
  989. int crtc_id, u64 crtc_base, bool async)
  990. {
  991. struct amdgpu_crtc *acrtc;
  992. const struct dc_stream *stream;
  993. struct dc_flip_addrs addr = { {0} };
  994. /*
  995. * TODO risk of concurrency issues
  996. *
  997. * This should guarded by the dal_mutex but we can't do this since the
  998. * caller uses a spin_lock on event_lock.
  999. *
  1000. * If we wait on the dal_mutex a second page flip interrupt might come,
  1001. * spin on the event_lock, disabling interrupts while it does so. At
  1002. * this point the core can no longer be pre-empted and return to the
  1003. * thread that waited on the dal_mutex and we're deadlocked.
  1004. *
  1005. * With multiple cores the same essentially happens but might just take
  1006. * a little longer to lock up all cores.
  1007. *
  1008. * The reason we should lock on dal_mutex is so that we can be sure
  1009. * nobody messes with acrtc->stream after we read and check its value.
  1010. *
  1011. * We might be able to fix our concurrency issues with a work queue
  1012. * where we schedule all work items (mode_set, page_flip, etc.) and
  1013. * execute them one by one. Care needs to be taken to still deal with
  1014. * any potential concurrency issues arising from interrupt calls.
  1015. */
  1016. acrtc = adev->mode_info.crtcs[crtc_id];
  1017. stream = acrtc->stream;
  1018. if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
  1019. DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
  1020. /* In commit tail framework this cannot happen */
  1021. BUG_ON(0);
  1022. }
  1023. /*
  1024. * Received a page flip call after the display has been reset.
  1025. * Just return in this case. Everything should be clean-up on reset.
  1026. */
  1027. if (!stream) {
  1028. WARN_ON(1);
  1029. return;
  1030. }
  1031. addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
  1032. addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
  1033. addr.flip_immediate = async;
  1034. if (acrtc->base.state->event &&
  1035. acrtc->base.state->event->event.base.type ==
  1036. DRM_EVENT_FLIP_COMPLETE) {
  1037. acrtc->event = acrtc->base.state->event;
  1038. /* Set the flip status */
  1039. acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
  1040. /* Mark this event as consumed */
  1041. acrtc->base.state->event = NULL;
  1042. }
  1043. dc_flip_surface_addrs(adev->dm.dc,
  1044. dc_stream_get_status(stream)->surfaces,
  1045. &addr, 1);
  1046. DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
  1047. __func__,
  1048. addr.address.grph.addr.high_part,
  1049. addr.address.grph.addr.low_part);
  1050. }
  1051. static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
  1052. struct drm_file *filp)
  1053. {
  1054. struct mod_freesync_params freesync_params;
  1055. uint8_t num_streams;
  1056. uint8_t i;
  1057. struct amdgpu_device *adev = dev->dev_private;
  1058. int r = 0;
  1059. /* Get freesync enable flag from DRM */
  1060. num_streams = dc_get_current_stream_count(adev->dm.dc);
  1061. for (i = 0; i < num_streams; i++) {
  1062. const struct dc_stream *stream;
  1063. stream = dc_get_stream_at_index(adev->dm.dc, i);
  1064. mod_freesync_update_state(adev->dm.freesync_module,
  1065. &stream, 1, &freesync_params);
  1066. }
  1067. return r;
  1068. }
  1069. static const struct amdgpu_display_funcs dm_display_funcs = {
  1070. .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
  1071. .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
  1072. .vblank_wait = NULL,
  1073. .backlight_set_level =
  1074. dm_set_backlight_level,/* called unconditionally */
  1075. .backlight_get_level =
  1076. dm_get_backlight_level,/* called unconditionally */
  1077. .hpd_sense = NULL,/* called unconditionally */
  1078. .hpd_set_polarity = NULL, /* called unconditionally */
  1079. .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
  1080. .page_flip = dm_page_flip, /* called unconditionally */
  1081. .page_flip_get_scanoutpos =
  1082. dm_crtc_get_scanoutpos,/* called unconditionally */
  1083. .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
  1084. .add_connector = NULL, /* VBIOS parsing. DAL does it. */
  1085. .notify_freesync = amdgpu_notify_freesync,
  1086. };
  1087. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1088. static ssize_t s3_debug_store(
  1089. struct device *device,
  1090. struct device_attribute *attr,
  1091. const char *buf,
  1092. size_t count)
  1093. {
  1094. int ret;
  1095. int s3_state;
  1096. struct pci_dev *pdev = to_pci_dev(device);
  1097. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  1098. struct amdgpu_device *adev = drm_dev->dev_private;
  1099. ret = kstrtoint(buf, 0, &s3_state);
  1100. if (ret == 0) {
  1101. if (s3_state) {
  1102. dm_resume(adev);
  1103. amdgpu_dm_display_resume(adev);
  1104. drm_kms_helper_hotplug_event(adev->ddev);
  1105. } else
  1106. dm_suspend(adev);
  1107. }
  1108. return ret == 0 ? count : 0;
  1109. }
  1110. DEVICE_ATTR_WO(s3_debug);
  1111. #endif
  1112. static int dm_early_init(void *handle)
  1113. {
  1114. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1115. amdgpu_dm_set_irq_funcs(adev);
  1116. switch (adev->asic_type) {
  1117. case CHIP_BONAIRE:
  1118. case CHIP_HAWAII:
  1119. adev->mode_info.num_crtc = 6;
  1120. adev->mode_info.num_hpd = 6;
  1121. adev->mode_info.num_dig = 6;
  1122. adev->mode_info.plane_type = dm_surfaces_type_default;
  1123. break;
  1124. case CHIP_FIJI:
  1125. case CHIP_TONGA:
  1126. adev->mode_info.num_crtc = 6;
  1127. adev->mode_info.num_hpd = 6;
  1128. adev->mode_info.num_dig = 7;
  1129. adev->mode_info.plane_type = dm_surfaces_type_default;
  1130. break;
  1131. case CHIP_CARRIZO:
  1132. adev->mode_info.num_crtc = 3;
  1133. adev->mode_info.num_hpd = 6;
  1134. adev->mode_info.num_dig = 9;
  1135. adev->mode_info.plane_type = dm_surfaces_type_carizzo;
  1136. break;
  1137. case CHIP_STONEY:
  1138. adev->mode_info.num_crtc = 2;
  1139. adev->mode_info.num_hpd = 6;
  1140. adev->mode_info.num_dig = 9;
  1141. adev->mode_info.plane_type = dm_surfaces_type_stoney;
  1142. break;
  1143. case CHIP_POLARIS11:
  1144. case CHIP_POLARIS12:
  1145. adev->mode_info.num_crtc = 5;
  1146. adev->mode_info.num_hpd = 5;
  1147. adev->mode_info.num_dig = 5;
  1148. adev->mode_info.plane_type = dm_surfaces_type_default;
  1149. break;
  1150. case CHIP_POLARIS10:
  1151. adev->mode_info.num_crtc = 6;
  1152. adev->mode_info.num_hpd = 6;
  1153. adev->mode_info.num_dig = 6;
  1154. adev->mode_info.plane_type = dm_surfaces_type_default;
  1155. break;
  1156. case CHIP_VEGA10:
  1157. adev->mode_info.num_crtc = 6;
  1158. adev->mode_info.num_hpd = 6;
  1159. adev->mode_info.num_dig = 6;
  1160. break;
  1161. default:
  1162. DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
  1163. return -EINVAL;
  1164. }
  1165. if (adev->mode_info.funcs == NULL)
  1166. adev->mode_info.funcs = &dm_display_funcs;
  1167. /* Note: Do NOT change adev->audio_endpt_rreg and
  1168. * adev->audio_endpt_wreg because they are initialised in
  1169. * amdgpu_device_init() */
  1170. #if defined(CONFIG_DEBUG_KERNEL_DC)
  1171. device_create_file(
  1172. adev->ddev->dev,
  1173. &dev_attr_s3_debug);
  1174. #endif
  1175. return 0;
  1176. }
  1177. bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
  1178. {
  1179. /* TODO */
  1180. return true;
  1181. }
  1182. bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
  1183. {
  1184. /* TODO */
  1185. return true;
  1186. }