amdgpu_display.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * Copyright 2007-8 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors: Dave Airlie
  24. * Alex Deucher
  25. */
  26. #include <drm/drmP.h>
  27. #include <drm/amdgpu_drm.h>
  28. #include "amdgpu.h"
  29. #include "amdgpu_i2c.h"
  30. #include "atom.h"
  31. #include "amdgpu_connectors.h"
  32. #include <asm/div64.h>
  33. #include <linux/pm_runtime.h>
  34. #include <drm/drm_crtc_helper.h>
  35. #include <drm/drm_edid.h>
  36. static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
  37. {
  38. struct amdgpu_flip_work *work =
  39. container_of(cb, struct amdgpu_flip_work, cb);
  40. dma_fence_put(f);
  41. schedule_work(&work->flip_work.work);
  42. }
  43. static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
  44. struct dma_fence **f)
  45. {
  46. struct dma_fence *fence= *f;
  47. if (fence == NULL)
  48. return false;
  49. *f = NULL;
  50. if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
  51. return true;
  52. dma_fence_put(fence);
  53. return false;
  54. }
  55. static void amdgpu_flip_work_func(struct work_struct *__work)
  56. {
  57. struct delayed_work *delayed_work =
  58. container_of(__work, struct delayed_work, work);
  59. struct amdgpu_flip_work *work =
  60. container_of(delayed_work, struct amdgpu_flip_work, flip_work);
  61. struct amdgpu_device *adev = work->adev;
  62. struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
  63. struct drm_crtc *crtc = &amdgpu_crtc->base;
  64. unsigned long flags;
  65. unsigned i;
  66. int vpos, hpos;
  67. if (amdgpu_flip_handle_fence(work, &work->excl))
  68. return;
  69. for (i = 0; i < work->shared_count; ++i)
  70. if (amdgpu_flip_handle_fence(work, &work->shared[i]))
  71. return;
  72. /* Wait until we're out of the vertical blank period before the one
  73. * targeted by the flip
  74. */
  75. if (amdgpu_crtc->enabled &&
  76. (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
  77. &vpos, &hpos, NULL, NULL,
  78. &crtc->hwmode)
  79. & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
  80. (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
  81. (int)(work->target_vblank -
  82. amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
  83. schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
  84. return;
  85. }
  86. /* We borrow the event spin lock for protecting flip_status */
  87. spin_lock_irqsave(&crtc->dev->event_lock, flags);
  88. /* Do the flip (mmio) */
  89. adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
  90. /* Set the flip status */
  91. amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
  92. spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
  93. DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
  94. amdgpu_crtc->crtc_id, amdgpu_crtc, work);
  95. }
  96. /*
  97. * Handle unpin events outside the interrupt handler proper.
  98. */
  99. static void amdgpu_unpin_work_func(struct work_struct *__work)
  100. {
  101. struct amdgpu_flip_work *work =
  102. container_of(__work, struct amdgpu_flip_work, unpin_work);
  103. int r;
  104. /* unpin of the old buffer */
  105. r = amdgpu_bo_reserve(work->old_abo, false);
  106. if (likely(r == 0)) {
  107. r = amdgpu_bo_unpin(work->old_abo);
  108. if (unlikely(r != 0)) {
  109. DRM_ERROR("failed to unpin buffer after flip\n");
  110. }
  111. amdgpu_bo_unreserve(work->old_abo);
  112. } else
  113. DRM_ERROR("failed to reserve buffer after flip\n");
  114. amdgpu_bo_unref(&work->old_abo);
  115. kfree(work->shared);
  116. kfree(work);
  117. }
  118. static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work)
  119. {
  120. int i;
  121. amdgpu_bo_unref(&work->old_abo);
  122. dma_fence_put(work->excl);
  123. for (i = 0; i < work->shared_count; ++i)
  124. dma_fence_put(work->shared[i]);
  125. kfree(work->shared);
  126. kfree(work);
  127. }
  128. static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work,
  129. struct amdgpu_bo *new_abo)
  130. {
  131. amdgpu_bo_unreserve(new_abo);
  132. amdgpu_flip_work_cleanup(work);
  133. }
  134. static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work,
  135. struct amdgpu_bo *new_abo)
  136. {
  137. if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
  138. DRM_ERROR("failed to unpin new abo in error path\n");
  139. amdgpu_flip_cleanup_unreserve(work, new_abo);
  140. }
  141. void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
  142. struct amdgpu_bo *new_abo)
  143. {
  144. if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
  145. DRM_ERROR("failed to reserve new abo in error path\n");
  146. amdgpu_flip_work_cleanup(work);
  147. return;
  148. }
  149. amdgpu_flip_cleanup_unpin(work, new_abo);
  150. }
  151. int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
  152. struct drm_framebuffer *fb,
  153. struct drm_pending_vblank_event *event,
  154. uint32_t page_flip_flags,
  155. uint32_t target,
  156. struct amdgpu_flip_work **work_p,
  157. struct amdgpu_bo **new_abo_p)
  158. {
  159. struct drm_device *dev = crtc->dev;
  160. struct amdgpu_device *adev = dev->dev_private;
  161. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  162. struct amdgpu_framebuffer *old_amdgpu_fb;
  163. struct amdgpu_framebuffer *new_amdgpu_fb;
  164. struct drm_gem_object *obj;
  165. struct amdgpu_flip_work *work;
  166. struct amdgpu_bo *new_abo;
  167. unsigned long flags;
  168. u64 tiling_flags;
  169. u64 base;
  170. int r;
  171. work = kzalloc(sizeof *work, GFP_KERNEL);
  172. if (work == NULL)
  173. return -ENOMEM;
  174. INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
  175. INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
  176. work->event = event;
  177. work->adev = adev;
  178. work->crtc_id = amdgpu_crtc->crtc_id;
  179. work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
  180. /* schedule unpin of the old buffer */
  181. old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
  182. obj = old_amdgpu_fb->obj;
  183. /* take a reference to the old object */
  184. work->old_abo = gem_to_amdgpu_bo(obj);
  185. amdgpu_bo_ref(work->old_abo);
  186. new_amdgpu_fb = to_amdgpu_framebuffer(fb);
  187. obj = new_amdgpu_fb->obj;
  188. new_abo = gem_to_amdgpu_bo(obj);
  189. /* pin the new buffer */
  190. r = amdgpu_bo_reserve(new_abo, false);
  191. if (unlikely(r != 0)) {
  192. DRM_ERROR("failed to reserve new abo buffer before flip\n");
  193. goto cleanup;
  194. }
  195. r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
  196. if (unlikely(r != 0)) {
  197. DRM_ERROR("failed to pin new abo buffer before flip\n");
  198. goto unreserve;
  199. }
  200. r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
  201. &work->shared_count,
  202. &work->shared);
  203. if (unlikely(r != 0)) {
  204. DRM_ERROR("failed to get fences for buffer\n");
  205. goto unpin;
  206. }
  207. amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
  208. amdgpu_bo_unreserve(new_abo);
  209. work->base = base;
  210. work->target_vblank = target - drm_crtc_vblank_count(crtc) +
  211. amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
  212. /* we borrow the event spin lock for protecting flip_wrok */
  213. spin_lock_irqsave(&crtc->dev->event_lock, flags);
  214. if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
  215. DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
  216. spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
  217. r = -EBUSY;
  218. goto pflip_cleanup;
  219. }
  220. spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
  221. *work_p = work;
  222. *new_abo_p = new_abo;
  223. return 0;
  224. pflip_cleanup:
  225. amdgpu_crtc_cleanup_flip_ctx(work, new_abo);
  226. return r;
  227. unpin:
  228. amdgpu_flip_cleanup_unpin(work, new_abo);
  229. return r;
  230. unreserve:
  231. amdgpu_flip_cleanup_unreserve(work, new_abo);
  232. return r;
  233. cleanup:
  234. amdgpu_flip_work_cleanup(work);
  235. return r;
  236. }
  237. void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
  238. struct drm_framebuffer *fb,
  239. struct amdgpu_flip_work *work,
  240. struct amdgpu_bo *new_abo)
  241. {
  242. unsigned long flags;
  243. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  244. spin_lock_irqsave(&crtc->dev->event_lock, flags);
  245. amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
  246. amdgpu_crtc->pflip_works = work;
  247. /* update crtc fb */
  248. crtc->primary->fb = fb;
  249. spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
  250. DRM_DEBUG_DRIVER(
  251. "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
  252. amdgpu_crtc->crtc_id, amdgpu_crtc, work);
  253. amdgpu_flip_work_func(&work->flip_work.work);
  254. }
  255. int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
  256. struct drm_framebuffer *fb,
  257. struct drm_pending_vblank_event *event,
  258. uint32_t page_flip_flags,
  259. uint32_t target,
  260. struct drm_modeset_acquire_ctx *ctx)
  261. {
  262. struct amdgpu_bo *new_abo;
  263. struct amdgpu_flip_work *work;
  264. int r;
  265. r = amdgpu_crtc_prepare_flip(crtc,
  266. fb,
  267. event,
  268. page_flip_flags,
  269. target,
  270. &work,
  271. &new_abo);
  272. if (r)
  273. return r;
  274. amdgpu_crtc_submit_flip(crtc, fb, work, new_abo);
  275. return 0;
  276. }
  277. int amdgpu_crtc_set_config(struct drm_mode_set *set)
  278. {
  279. struct drm_device *dev;
  280. struct amdgpu_device *adev;
  281. struct drm_crtc *crtc;
  282. bool active = false;
  283. int ret;
  284. if (!set || !set->crtc)
  285. return -EINVAL;
  286. dev = set->crtc->dev;
  287. ret = pm_runtime_get_sync(dev->dev);
  288. if (ret < 0)
  289. return ret;
  290. ret = drm_crtc_helper_set_config(set);
  291. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
  292. if (crtc->enabled)
  293. active = true;
  294. pm_runtime_mark_last_busy(dev->dev);
  295. adev = dev->dev_private;
  296. /* if we have active crtcs and we don't have a power ref,
  297. take the current one */
  298. if (active && !adev->have_disp_power_ref) {
  299. adev->have_disp_power_ref = true;
  300. return ret;
  301. }
  302. /* if we have no active crtcs, then drop the power ref
  303. we got before */
  304. if (!active && adev->have_disp_power_ref) {
  305. pm_runtime_put_autosuspend(dev->dev);
  306. adev->have_disp_power_ref = false;
  307. }
  308. /* drop the power reference we got coming in here */
  309. pm_runtime_put_autosuspend(dev->dev);
  310. return ret;
  311. }
  312. static const char *encoder_names[41] = {
  313. "NONE",
  314. "INTERNAL_LVDS",
  315. "INTERNAL_TMDS1",
  316. "INTERNAL_TMDS2",
  317. "INTERNAL_DAC1",
  318. "INTERNAL_DAC2",
  319. "INTERNAL_SDVOA",
  320. "INTERNAL_SDVOB",
  321. "SI170B",
  322. "CH7303",
  323. "CH7301",
  324. "INTERNAL_DVO1",
  325. "EXTERNAL_SDVOA",
  326. "EXTERNAL_SDVOB",
  327. "TITFP513",
  328. "INTERNAL_LVTM1",
  329. "VT1623",
  330. "HDMI_SI1930",
  331. "HDMI_INTERNAL",
  332. "INTERNAL_KLDSCP_TMDS1",
  333. "INTERNAL_KLDSCP_DVO1",
  334. "INTERNAL_KLDSCP_DAC1",
  335. "INTERNAL_KLDSCP_DAC2",
  336. "SI178",
  337. "MVPU_FPGA",
  338. "INTERNAL_DDI",
  339. "VT1625",
  340. "HDMI_SI1932",
  341. "DP_AN9801",
  342. "DP_DP501",
  343. "INTERNAL_UNIPHY",
  344. "INTERNAL_KLDSCP_LVTMA",
  345. "INTERNAL_UNIPHY1",
  346. "INTERNAL_UNIPHY2",
  347. "NUTMEG",
  348. "TRAVIS",
  349. "INTERNAL_VCE",
  350. "INTERNAL_UNIPHY3",
  351. "HDMI_ANX9805",
  352. "INTERNAL_AMCLK",
  353. "VIRTUAL",
  354. };
  355. static const char *hpd_names[6] = {
  356. "HPD1",
  357. "HPD2",
  358. "HPD3",
  359. "HPD4",
  360. "HPD5",
  361. "HPD6",
  362. };
  363. void amdgpu_print_display_setup(struct drm_device *dev)
  364. {
  365. struct drm_connector *connector;
  366. struct amdgpu_connector *amdgpu_connector;
  367. struct drm_encoder *encoder;
  368. struct amdgpu_encoder *amdgpu_encoder;
  369. uint32_t devices;
  370. int i = 0;
  371. DRM_INFO("AMDGPU Display Connectors\n");
  372. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  373. amdgpu_connector = to_amdgpu_connector(connector);
  374. DRM_INFO("Connector %d:\n", i);
  375. DRM_INFO(" %s\n", connector->name);
  376. if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
  377. DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
  378. if (amdgpu_connector->ddc_bus) {
  379. DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
  380. amdgpu_connector->ddc_bus->rec.mask_clk_reg,
  381. amdgpu_connector->ddc_bus->rec.mask_data_reg,
  382. amdgpu_connector->ddc_bus->rec.a_clk_reg,
  383. amdgpu_connector->ddc_bus->rec.a_data_reg,
  384. amdgpu_connector->ddc_bus->rec.en_clk_reg,
  385. amdgpu_connector->ddc_bus->rec.en_data_reg,
  386. amdgpu_connector->ddc_bus->rec.y_clk_reg,
  387. amdgpu_connector->ddc_bus->rec.y_data_reg);
  388. if (amdgpu_connector->router.ddc_valid)
  389. DRM_INFO(" DDC Router 0x%x/0x%x\n",
  390. amdgpu_connector->router.ddc_mux_control_pin,
  391. amdgpu_connector->router.ddc_mux_state);
  392. if (amdgpu_connector->router.cd_valid)
  393. DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
  394. amdgpu_connector->router.cd_mux_control_pin,
  395. amdgpu_connector->router.cd_mux_state);
  396. } else {
  397. if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
  398. connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
  399. connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
  400. connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
  401. connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
  402. connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
  403. DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
  404. }
  405. DRM_INFO(" Encoders:\n");
  406. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  407. amdgpu_encoder = to_amdgpu_encoder(encoder);
  408. devices = amdgpu_encoder->devices & amdgpu_connector->devices;
  409. if (devices) {
  410. if (devices & ATOM_DEVICE_CRT1_SUPPORT)
  411. DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  412. if (devices & ATOM_DEVICE_CRT2_SUPPORT)
  413. DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  414. if (devices & ATOM_DEVICE_LCD1_SUPPORT)
  415. DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  416. if (devices & ATOM_DEVICE_DFP1_SUPPORT)
  417. DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  418. if (devices & ATOM_DEVICE_DFP2_SUPPORT)
  419. DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  420. if (devices & ATOM_DEVICE_DFP3_SUPPORT)
  421. DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  422. if (devices & ATOM_DEVICE_DFP4_SUPPORT)
  423. DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  424. if (devices & ATOM_DEVICE_DFP5_SUPPORT)
  425. DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  426. if (devices & ATOM_DEVICE_DFP6_SUPPORT)
  427. DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  428. if (devices & ATOM_DEVICE_TV1_SUPPORT)
  429. DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  430. if (devices & ATOM_DEVICE_CV_SUPPORT)
  431. DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
  432. }
  433. }
  434. i++;
  435. }
  436. }
  437. /**
  438. * amdgpu_ddc_probe
  439. *
  440. */
  441. bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
  442. bool use_aux)
  443. {
  444. u8 out = 0x0;
  445. u8 buf[8];
  446. int ret;
  447. struct i2c_msg msgs[] = {
  448. {
  449. .addr = DDC_ADDR,
  450. .flags = 0,
  451. .len = 1,
  452. .buf = &out,
  453. },
  454. {
  455. .addr = DDC_ADDR,
  456. .flags = I2C_M_RD,
  457. .len = 8,
  458. .buf = buf,
  459. }
  460. };
  461. /* on hw with routers, select right port */
  462. if (amdgpu_connector->router.ddc_valid)
  463. amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
  464. if (use_aux) {
  465. ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
  466. } else {
  467. ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
  468. }
  469. if (ret != 2)
  470. /* Couldn't find an accessible DDC on this connector */
  471. return false;
  472. /* Probe also for valid EDID header
  473. * EDID header starts with:
  474. * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
  475. * Only the first 6 bytes must be valid as
  476. * drm_edid_block_valid() can fix the last 2 bytes */
  477. if (drm_edid_header_is_valid(buf) < 6) {
  478. /* Couldn't find an accessible EDID on this
  479. * connector */
  480. return false;
  481. }
  482. return true;
  483. }
  484. static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
  485. {
  486. struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
  487. drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
  488. drm_framebuffer_cleanup(fb);
  489. kfree(amdgpu_fb);
  490. }
  491. static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
  492. struct drm_file *file_priv,
  493. unsigned int *handle)
  494. {
  495. struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
  496. return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
  497. }
  498. static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
  499. .destroy = amdgpu_user_framebuffer_destroy,
  500. .create_handle = amdgpu_user_framebuffer_create_handle,
  501. };
  502. int
  503. amdgpu_framebuffer_init(struct drm_device *dev,
  504. struct amdgpu_framebuffer *rfb,
  505. const struct drm_mode_fb_cmd2 *mode_cmd,
  506. struct drm_gem_object *obj)
  507. {
  508. int ret;
  509. rfb->obj = obj;
  510. drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
  511. ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
  512. if (ret) {
  513. rfb->obj = NULL;
  514. return ret;
  515. }
  516. return 0;
  517. }
  518. static struct drm_framebuffer *
  519. amdgpu_user_framebuffer_create(struct drm_device *dev,
  520. struct drm_file *file_priv,
  521. const struct drm_mode_fb_cmd2 *mode_cmd)
  522. {
  523. struct drm_gem_object *obj;
  524. struct amdgpu_framebuffer *amdgpu_fb;
  525. int ret;
  526. obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
  527. if (obj == NULL) {
  528. dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
  529. "can't create framebuffer\n", mode_cmd->handles[0]);
  530. return ERR_PTR(-ENOENT);
  531. }
  532. amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
  533. if (amdgpu_fb == NULL) {
  534. drm_gem_object_unreference_unlocked(obj);
  535. return ERR_PTR(-ENOMEM);
  536. }
  537. ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
  538. if (ret) {
  539. kfree(amdgpu_fb);
  540. drm_gem_object_unreference_unlocked(obj);
  541. return ERR_PTR(ret);
  542. }
  543. return &amdgpu_fb->base;
  544. }
  545. static void amdgpu_output_poll_changed(struct drm_device *dev)
  546. {
  547. struct amdgpu_device *adev = dev->dev_private;
  548. amdgpu_fb_output_poll_changed(adev);
  549. }
  550. const struct drm_mode_config_funcs amdgpu_mode_funcs = {
  551. .fb_create = amdgpu_user_framebuffer_create,
  552. .output_poll_changed = amdgpu_output_poll_changed
  553. };
  554. static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
  555. { { UNDERSCAN_OFF, "off" },
  556. { UNDERSCAN_ON, "on" },
  557. { UNDERSCAN_AUTO, "auto" },
  558. };
  559. static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
  560. { { AMDGPU_AUDIO_DISABLE, "off" },
  561. { AMDGPU_AUDIO_ENABLE, "on" },
  562. { AMDGPU_AUDIO_AUTO, "auto" },
  563. };
  564. /* XXX support different dither options? spatial, temporal, both, etc. */
  565. static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
  566. { { AMDGPU_FMT_DITHER_DISABLE, "off" },
  567. { AMDGPU_FMT_DITHER_ENABLE, "on" },
  568. };
  569. int amdgpu_modeset_create_props(struct amdgpu_device *adev)
  570. {
  571. int sz;
  572. adev->mode_info.coherent_mode_property =
  573. drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
  574. if (!adev->mode_info.coherent_mode_property)
  575. return -ENOMEM;
  576. adev->mode_info.load_detect_property =
  577. drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
  578. if (!adev->mode_info.load_detect_property)
  579. return -ENOMEM;
  580. drm_mode_create_scaling_mode_property(adev->ddev);
  581. sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
  582. adev->mode_info.underscan_property =
  583. drm_property_create_enum(adev->ddev, 0,
  584. "underscan",
  585. amdgpu_underscan_enum_list, sz);
  586. adev->mode_info.underscan_hborder_property =
  587. drm_property_create_range(adev->ddev, 0,
  588. "underscan hborder", 0, 128);
  589. if (!adev->mode_info.underscan_hborder_property)
  590. return -ENOMEM;
  591. adev->mode_info.underscan_vborder_property =
  592. drm_property_create_range(adev->ddev, 0,
  593. "underscan vborder", 0, 128);
  594. if (!adev->mode_info.underscan_vborder_property)
  595. return -ENOMEM;
  596. sz = ARRAY_SIZE(amdgpu_audio_enum_list);
  597. adev->mode_info.audio_property =
  598. drm_property_create_enum(adev->ddev, 0,
  599. "audio",
  600. amdgpu_audio_enum_list, sz);
  601. sz = ARRAY_SIZE(amdgpu_dither_enum_list);
  602. adev->mode_info.dither_property =
  603. drm_property_create_enum(adev->ddev, 0,
  604. "dither",
  605. amdgpu_dither_enum_list, sz);
  606. return 0;
  607. }
  608. void amdgpu_update_display_priority(struct amdgpu_device *adev)
  609. {
  610. /* adjustment options for the display watermarks */
  611. if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
  612. adev->mode_info.disp_priority = 0;
  613. else
  614. adev->mode_info.disp_priority = amdgpu_disp_priority;
  615. }
  616. static bool is_hdtv_mode(const struct drm_display_mode *mode)
  617. {
  618. /* try and guess if this is a tv or a monitor */
  619. if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
  620. (mode->vdisplay == 576) || /* 576p */
  621. (mode->vdisplay == 720) || /* 720p */
  622. (mode->vdisplay == 1080)) /* 1080p */
  623. return true;
  624. else
  625. return false;
  626. }
  627. bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  628. const struct drm_display_mode *mode,
  629. struct drm_display_mode *adjusted_mode)
  630. {
  631. struct drm_device *dev = crtc->dev;
  632. struct drm_encoder *encoder;
  633. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  634. struct amdgpu_encoder *amdgpu_encoder;
  635. struct drm_connector *connector;
  636. struct amdgpu_connector *amdgpu_connector;
  637. u32 src_v = 1, dst_v = 1;
  638. u32 src_h = 1, dst_h = 1;
  639. amdgpu_crtc->h_border = 0;
  640. amdgpu_crtc->v_border = 0;
  641. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  642. if (encoder->crtc != crtc)
  643. continue;
  644. amdgpu_encoder = to_amdgpu_encoder(encoder);
  645. connector = amdgpu_get_connector_for_encoder(encoder);
  646. amdgpu_connector = to_amdgpu_connector(connector);
  647. /* set scaling */
  648. if (amdgpu_encoder->rmx_type == RMX_OFF)
  649. amdgpu_crtc->rmx_type = RMX_OFF;
  650. else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
  651. mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
  652. amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
  653. else
  654. amdgpu_crtc->rmx_type = RMX_OFF;
  655. /* copy native mode */
  656. memcpy(&amdgpu_crtc->native_mode,
  657. &amdgpu_encoder->native_mode,
  658. sizeof(struct drm_display_mode));
  659. src_v = crtc->mode.vdisplay;
  660. dst_v = amdgpu_crtc->native_mode.vdisplay;
  661. src_h = crtc->mode.hdisplay;
  662. dst_h = amdgpu_crtc->native_mode.hdisplay;
  663. /* fix up for overscan on hdmi */
  664. if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
  665. ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
  666. ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
  667. drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
  668. is_hdtv_mode(mode)))) {
  669. if (amdgpu_encoder->underscan_hborder != 0)
  670. amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
  671. else
  672. amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
  673. if (amdgpu_encoder->underscan_vborder != 0)
  674. amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
  675. else
  676. amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
  677. amdgpu_crtc->rmx_type = RMX_FULL;
  678. src_v = crtc->mode.vdisplay;
  679. dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
  680. src_h = crtc->mode.hdisplay;
  681. dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
  682. }
  683. }
  684. if (amdgpu_crtc->rmx_type != RMX_OFF) {
  685. fixed20_12 a, b;
  686. a.full = dfixed_const(src_v);
  687. b.full = dfixed_const(dst_v);
  688. amdgpu_crtc->vsc.full = dfixed_div(a, b);
  689. a.full = dfixed_const(src_h);
  690. b.full = dfixed_const(dst_h);
  691. amdgpu_crtc->hsc.full = dfixed_div(a, b);
  692. } else {
  693. amdgpu_crtc->vsc.full = dfixed_const(1);
  694. amdgpu_crtc->hsc.full = dfixed_const(1);
  695. }
  696. return true;
  697. }
  698. /*
  699. * Retrieve current video scanout position of crtc on a given gpu, and
  700. * an optional accurate timestamp of when query happened.
  701. *
  702. * \param dev Device to query.
  703. * \param pipe Crtc to query.
  704. * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
  705. * For driver internal use only also supports these flags:
  706. *
  707. * USE_REAL_VBLANKSTART to use the real start of vblank instead
  708. * of a fudged earlier start of vblank.
  709. *
  710. * GET_DISTANCE_TO_VBLANKSTART to return distance to the
  711. * fudged earlier start of vblank in *vpos and the distance
  712. * to true start of vblank in *hpos.
  713. *
  714. * \param *vpos Location where vertical scanout position should be stored.
  715. * \param *hpos Location where horizontal scanout position should go.
  716. * \param *stime Target location for timestamp taken immediately before
  717. * scanout position query. Can be NULL to skip timestamp.
  718. * \param *etime Target location for timestamp taken immediately after
  719. * scanout position query. Can be NULL to skip timestamp.
  720. *
  721. * Returns vpos as a positive number while in active scanout area.
  722. * Returns vpos as a negative number inside vblank, counting the number
  723. * of scanlines to go until end of vblank, e.g., -1 means "one scanline
  724. * until start of active scanout / end of vblank."
  725. *
  726. * \return Flags, or'ed together as follows:
  727. *
  728. * DRM_SCANOUTPOS_VALID = Query successful.
  729. * DRM_SCANOUTPOS_INVBL = Inside vblank.
  730. * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
  731. * this flag means that returned position may be offset by a constant but
  732. * unknown small number of scanlines wrt. real scanout position.
  733. *
  734. */
  735. int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
  736. unsigned int flags, int *vpos, int *hpos,
  737. ktime_t *stime, ktime_t *etime,
  738. const struct drm_display_mode *mode)
  739. {
  740. u32 vbl = 0, position = 0;
  741. int vbl_start, vbl_end, vtotal, ret = 0;
  742. bool in_vbl = true;
  743. struct amdgpu_device *adev = dev->dev_private;
  744. /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
  745. /* Get optional system timestamp before query. */
  746. if (stime)
  747. *stime = ktime_get();
  748. if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
  749. ret |= DRM_SCANOUTPOS_VALID;
  750. /* Get optional system timestamp after query. */
  751. if (etime)
  752. *etime = ktime_get();
  753. /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
  754. /* Decode into vertical and horizontal scanout position. */
  755. *vpos = position & 0x1fff;
  756. *hpos = (position >> 16) & 0x1fff;
  757. /* Valid vblank area boundaries from gpu retrieved? */
  758. if (vbl > 0) {
  759. /* Yes: Decode. */
  760. ret |= DRM_SCANOUTPOS_ACCURATE;
  761. vbl_start = vbl & 0x1fff;
  762. vbl_end = (vbl >> 16) & 0x1fff;
  763. }
  764. else {
  765. /* No: Fake something reasonable which gives at least ok results. */
  766. vbl_start = mode->crtc_vdisplay;
  767. vbl_end = 0;
  768. }
  769. /* Called from driver internal vblank counter query code? */
  770. if (flags & GET_DISTANCE_TO_VBLANKSTART) {
  771. /* Caller wants distance from real vbl_start in *hpos */
  772. *hpos = *vpos - vbl_start;
  773. }
  774. /* Fudge vblank to start a few scanlines earlier to handle the
  775. * problem that vblank irqs fire a few scanlines before start
  776. * of vblank. Some driver internal callers need the true vblank
  777. * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
  778. *
  779. * The cause of the "early" vblank irq is that the irq is triggered
  780. * by the line buffer logic when the line buffer read position enters
  781. * the vblank, whereas our crtc scanout position naturally lags the
  782. * line buffer read position.
  783. */
  784. if (!(flags & USE_REAL_VBLANKSTART))
  785. vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
  786. /* Test scanout position against vblank region. */
  787. if ((*vpos < vbl_start) && (*vpos >= vbl_end))
  788. in_vbl = false;
  789. /* In vblank? */
  790. if (in_vbl)
  791. ret |= DRM_SCANOUTPOS_IN_VBLANK;
  792. /* Called from driver internal vblank counter query code? */
  793. if (flags & GET_DISTANCE_TO_VBLANKSTART) {
  794. /* Caller wants distance from fudged earlier vbl_start */
  795. *vpos -= vbl_start;
  796. return ret;
  797. }
  798. /* Check if inside vblank area and apply corrective offsets:
  799. * vpos will then be >=0 in video scanout area, but negative
  800. * within vblank area, counting down the number of lines until
  801. * start of scanout.
  802. */
  803. /* Inside "upper part" of vblank area? Apply corrective offset if so: */
  804. if (in_vbl && (*vpos >= vbl_start)) {
  805. vtotal = mode->crtc_vtotal;
  806. *vpos = *vpos - vtotal;
  807. }
  808. /* Correct for shifted end of vbl at vbl_end. */
  809. *vpos = *vpos - vbl_end;
  810. return ret;
  811. }
  812. int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
  813. {
  814. if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
  815. return AMDGPU_CRTC_IRQ_NONE;
  816. switch (crtc) {
  817. case 0:
  818. return AMDGPU_CRTC_IRQ_VBLANK1;
  819. case 1:
  820. return AMDGPU_CRTC_IRQ_VBLANK2;
  821. case 2:
  822. return AMDGPU_CRTC_IRQ_VBLANK3;
  823. case 3:
  824. return AMDGPU_CRTC_IRQ_VBLANK4;
  825. case 4:
  826. return AMDGPU_CRTC_IRQ_VBLANK5;
  827. case 5:
  828. return AMDGPU_CRTC_IRQ_VBLANK6;
  829. default:
  830. return AMDGPU_CRTC_IRQ_NONE;
  831. }
  832. }