vmwgfx_scrn.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /**************************************************************************
  2. *
  3. * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_kms.h"
  28. #include <drm/drm_plane_helper.h>
  29. #include <drm/drm_atomic.h>
  30. #include <drm/drm_atomic_helper.h>
  31. #define vmw_crtc_to_sou(x) \
  32. container_of(x, struct vmw_screen_object_unit, base.crtc)
  33. #define vmw_encoder_to_sou(x) \
  34. container_of(x, struct vmw_screen_object_unit, base.encoder)
  35. #define vmw_connector_to_sou(x) \
  36. container_of(x, struct vmw_screen_object_unit, base.connector)
  37. /**
  38. * struct vmw_kms_sou_surface_dirty - Closure structure for
  39. * blit surface to screen command.
  40. * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
  41. * @left: Left side of bounding box.
  42. * @right: Right side of bounding box.
  43. * @top: Top side of bounding box.
  44. * @bottom: Bottom side of bounding box.
  45. * @dst_x: Difference between source clip rects and framebuffer coordinates.
  46. * @dst_y: Difference between source clip rects and framebuffer coordinates.
  47. * @sid: Surface id of surface to copy from.
  48. */
  49. struct vmw_kms_sou_surface_dirty {
  50. struct vmw_kms_dirty base;
  51. s32 left, right, top, bottom;
  52. s32 dst_x, dst_y;
  53. u32 sid;
  54. };
  55. /*
  56. * SVGA commands that are used by this code. Please see the device headers
  57. * for explanation.
  58. */
  59. struct vmw_kms_sou_readback_blit {
  60. uint32 header;
  61. SVGAFifoCmdBlitScreenToGMRFB body;
  62. };
  63. struct vmw_kms_sou_dmabuf_blit {
  64. uint32 header;
  65. SVGAFifoCmdBlitGMRFBToScreen body;
  66. };
  67. struct vmw_kms_sou_dirty_cmd {
  68. SVGA3dCmdHeader header;
  69. SVGA3dCmdBlitSurfaceToScreen body;
  70. };
  71. /**
  72. * Display unit using screen objects.
  73. */
  74. struct vmw_screen_object_unit {
  75. struct vmw_display_unit base;
  76. unsigned long buffer_size; /**< Size of allocated buffer */
  77. struct vmw_dma_buffer *buffer; /**< Backing store buffer */
  78. bool defined;
  79. };
  80. static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
  81. {
  82. vmw_du_cleanup(&sou->base);
  83. kfree(sou);
  84. }
  85. /*
  86. * Screen Object Display Unit CRTC functions
  87. */
  88. static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
  89. {
  90. vmw_sou_destroy(vmw_crtc_to_sou(crtc));
  91. }
  92. /**
  93. * Send the fifo command to create a screen.
  94. */
  95. static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
  96. struct vmw_screen_object_unit *sou,
  97. uint32_t x, uint32_t y,
  98. struct drm_display_mode *mode)
  99. {
  100. size_t fifo_size;
  101. struct {
  102. struct {
  103. uint32_t cmdType;
  104. } header;
  105. SVGAScreenObject obj;
  106. } *cmd;
  107. BUG_ON(!sou->buffer);
  108. fifo_size = sizeof(*cmd);
  109. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  110. /* The hardware has hung, nothing we can do about it here. */
  111. if (unlikely(cmd == NULL)) {
  112. DRM_ERROR("Fifo reserve failed.\n");
  113. return -ENOMEM;
  114. }
  115. memset(cmd, 0, fifo_size);
  116. cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
  117. cmd->obj.structSize = sizeof(SVGAScreenObject);
  118. cmd->obj.id = sou->base.unit;
  119. cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
  120. (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
  121. cmd->obj.size.width = mode->hdisplay;
  122. cmd->obj.size.height = mode->vdisplay;
  123. if (sou->base.is_implicit) {
  124. cmd->obj.root.x = x;
  125. cmd->obj.root.y = y;
  126. } else {
  127. cmd->obj.root.x = sou->base.gui_x;
  128. cmd->obj.root.y = sou->base.gui_y;
  129. }
  130. sou->base.set_gui_x = cmd->obj.root.x;
  131. sou->base.set_gui_y = cmd->obj.root.y;
  132. /* Ok to assume that buffer is pinned in vram */
  133. vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
  134. cmd->obj.backingStore.pitch = mode->hdisplay * 4;
  135. vmw_fifo_commit(dev_priv, fifo_size);
  136. sou->defined = true;
  137. return 0;
  138. }
  139. /**
  140. * Send the fifo command to destroy a screen.
  141. */
  142. static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
  143. struct vmw_screen_object_unit *sou)
  144. {
  145. size_t fifo_size;
  146. int ret;
  147. struct {
  148. struct {
  149. uint32_t cmdType;
  150. } header;
  151. SVGAFifoCmdDestroyScreen body;
  152. } *cmd;
  153. /* no need to do anything */
  154. if (unlikely(!sou->defined))
  155. return 0;
  156. fifo_size = sizeof(*cmd);
  157. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  158. /* the hardware has hung, nothing we can do about it here */
  159. if (unlikely(cmd == NULL)) {
  160. DRM_ERROR("Fifo reserve failed.\n");
  161. return -ENOMEM;
  162. }
  163. memset(cmd, 0, fifo_size);
  164. cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
  165. cmd->body.screenId = sou->base.unit;
  166. vmw_fifo_commit(dev_priv, fifo_size);
  167. /* Force sync */
  168. ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
  169. if (unlikely(ret != 0))
  170. DRM_ERROR("Failed to sync with HW");
  171. else
  172. sou->defined = false;
  173. return ret;
  174. }
  175. /**
  176. * vmw_sou_crtc_mode_set_nofb - Create new screen
  177. *
  178. * @crtc: CRTC associated with the new screen
  179. *
  180. * This function creates/destroys a screen. This function cannot fail, so if
  181. * somehow we run into a failure, just do the best we can to get out.
  182. */
  183. static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
  184. {
  185. struct vmw_private *dev_priv;
  186. struct vmw_screen_object_unit *sou;
  187. struct vmw_framebuffer *vfb;
  188. struct drm_framebuffer *fb;
  189. struct drm_plane_state *ps;
  190. struct vmw_plane_state *vps;
  191. int ret;
  192. sou = vmw_crtc_to_sou(crtc);
  193. dev_priv = vmw_priv(crtc->dev);
  194. ps = crtc->primary->state;
  195. fb = ps->fb;
  196. vps = vmw_plane_state_to_vps(ps);
  197. vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
  198. if (sou->defined) {
  199. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  200. if (ret) {
  201. DRM_ERROR("Failed to destroy Screen Object\n");
  202. return;
  203. }
  204. }
  205. if (vfb) {
  206. sou->buffer = vps->dmabuf;
  207. sou->buffer_size = vps->dmabuf_size;
  208. ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
  209. &crtc->mode);
  210. if (ret)
  211. DRM_ERROR("Failed to define Screen Object %dx%d\n",
  212. crtc->x, crtc->y);
  213. vmw_kms_add_active(dev_priv, &sou->base, vfb);
  214. } else {
  215. sou->buffer = NULL;
  216. sou->buffer_size = 0;
  217. vmw_kms_del_active(dev_priv, &sou->base);
  218. }
  219. }
  220. /**
  221. * vmw_sou_crtc_helper_prepare - Noop
  222. *
  223. * @crtc: CRTC associated with the new screen
  224. *
  225. * Prepares the CRTC for a mode set, but we don't need to do anything here.
  226. */
  227. static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
  228. {
  229. }
  230. /**
  231. * vmw_sou_crtc_helper_commit - Noop
  232. *
  233. * @crtc: CRTC associated with the new screen
  234. *
  235. * This is called after a mode set has been completed.
  236. */
  237. static void vmw_sou_crtc_helper_commit(struct drm_crtc *crtc)
  238. {
  239. }
  240. /**
  241. * vmw_sou_crtc_helper_disable - Turns off CRTC
  242. *
  243. * @crtc: CRTC to be turned off
  244. */
  245. static void vmw_sou_crtc_helper_disable(struct drm_crtc *crtc)
  246. {
  247. struct vmw_private *dev_priv;
  248. struct vmw_screen_object_unit *sou;
  249. int ret;
  250. if (!crtc) {
  251. DRM_ERROR("CRTC is NULL\n");
  252. return;
  253. }
  254. sou = vmw_crtc_to_sou(crtc);
  255. dev_priv = vmw_priv(crtc->dev);
  256. if (sou->defined) {
  257. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  258. if (ret)
  259. DRM_ERROR("Failed to destroy Screen Object\n");
  260. }
  261. }
  262. static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
  263. struct drm_framebuffer *new_fb,
  264. struct drm_pending_vblank_event *event,
  265. uint32_t flags,
  266. struct drm_modeset_acquire_ctx *ctx)
  267. {
  268. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  269. struct drm_framebuffer *old_fb = crtc->primary->fb;
  270. struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
  271. struct vmw_fence_obj *fence = NULL;
  272. struct drm_vmw_rect vclips;
  273. int ret;
  274. if (!vmw_kms_crtc_flippable(dev_priv, crtc))
  275. return -EINVAL;
  276. flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
  277. ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx);
  278. if (ret) {
  279. DRM_ERROR("Page flip error %d.\n", ret);
  280. return ret;
  281. }
  282. /* do a full screen dirty update */
  283. vclips.x = crtc->x;
  284. vclips.y = crtc->y;
  285. vclips.w = crtc->mode.hdisplay;
  286. vclips.h = crtc->mode.vdisplay;
  287. if (vfb->dmabuf)
  288. ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
  289. NULL, &vclips, 1, 1,
  290. true, &fence);
  291. else
  292. ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
  293. NULL, &vclips, NULL,
  294. 0, 0, 1, 1, &fence);
  295. if (ret != 0)
  296. goto out_no_fence;
  297. if (!fence) {
  298. ret = -EINVAL;
  299. goto out_no_fence;
  300. }
  301. if (event) {
  302. struct drm_file *file_priv = event->base.file_priv;
  303. ret = vmw_event_fence_action_queue(file_priv, fence,
  304. &event->base,
  305. &event->event.tv_sec,
  306. &event->event.tv_usec,
  307. true);
  308. }
  309. /*
  310. * No need to hold on to this now. The only cleanup
  311. * we need to do if we fail is unref the fence.
  312. */
  313. vmw_fence_obj_unreference(&fence);
  314. if (vmw_crtc_to_du(crtc)->is_implicit)
  315. vmw_kms_update_implicit_fb(dev_priv, crtc);
  316. return ret;
  317. out_no_fence:
  318. drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb);
  319. return ret;
  320. }
  321. static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
  322. .gamma_set = vmw_du_crtc_gamma_set,
  323. .destroy = vmw_sou_crtc_destroy,
  324. .reset = vmw_du_crtc_reset,
  325. .atomic_duplicate_state = vmw_du_crtc_duplicate_state,
  326. .atomic_destroy_state = vmw_du_crtc_destroy_state,
  327. .set_config = vmw_kms_set_config,
  328. .page_flip = vmw_sou_crtc_page_flip,
  329. };
  330. /*
  331. * Screen Object Display Unit encoder functions
  332. */
  333. static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
  334. {
  335. vmw_sou_destroy(vmw_encoder_to_sou(encoder));
  336. }
  337. static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
  338. .destroy = vmw_sou_encoder_destroy,
  339. };
  340. /*
  341. * Screen Object Display Unit connector functions
  342. */
  343. static void vmw_sou_connector_destroy(struct drm_connector *connector)
  344. {
  345. vmw_sou_destroy(vmw_connector_to_sou(connector));
  346. }
  347. static const struct drm_connector_funcs vmw_sou_connector_funcs = {
  348. .dpms = vmw_du_connector_dpms,
  349. .detect = vmw_du_connector_detect,
  350. .fill_modes = vmw_du_connector_fill_modes,
  351. .set_property = vmw_du_connector_set_property,
  352. .destroy = vmw_sou_connector_destroy,
  353. .reset = vmw_du_connector_reset,
  354. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  355. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  356. .atomic_set_property = vmw_du_connector_atomic_set_property,
  357. .atomic_get_property = vmw_du_connector_atomic_get_property,
  358. };
  359. static const struct
  360. drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
  361. .best_encoder = drm_atomic_helper_best_encoder,
  362. };
  363. /*
  364. * Screen Object Display Plane Functions
  365. */
  366. /**
  367. * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
  368. *
  369. * @plane: display plane
  370. * @old_state: Contains the FB to clean up
  371. *
  372. * Unpins the display surface
  373. *
  374. * Returns 0 on success
  375. */
  376. static void
  377. vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
  378. struct drm_plane_state *old_state)
  379. {
  380. struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
  381. vmw_dmabuf_unreference(&vps->dmabuf);
  382. vps->dmabuf_size = 0;
  383. vmw_du_plane_cleanup_fb(plane, old_state);
  384. }
  385. /**
  386. * vmw_sou_primary_plane_prepare_fb - allocate backing buffer
  387. *
  388. * @plane: display plane
  389. * @new_state: info on the new plane state, including the FB
  390. *
  391. * The SOU backing buffer is our equivalent of the display plane.
  392. *
  393. * Returns 0 on success
  394. */
  395. static int
  396. vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
  397. struct drm_plane_state *new_state)
  398. {
  399. struct drm_framebuffer *new_fb = new_state->fb;
  400. struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
  401. struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
  402. struct vmw_private *dev_priv;
  403. size_t size;
  404. int ret;
  405. if (!new_fb) {
  406. vmw_dmabuf_unreference(&vps->dmabuf);
  407. vps->dmabuf_size = 0;
  408. return 0;
  409. }
  410. size = new_state->crtc_w * new_state->crtc_h * 4;
  411. if (vps->dmabuf) {
  412. if (vps->dmabuf_size == size)
  413. return 0;
  414. vmw_dmabuf_unreference(&vps->dmabuf);
  415. vps->dmabuf_size = 0;
  416. }
  417. vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
  418. if (!vps->dmabuf)
  419. return -ENOMEM;
  420. dev_priv = vmw_priv(crtc->dev);
  421. vmw_svga_enable(dev_priv);
  422. /* After we have alloced the backing store might not be able to
  423. * resume the overlays, this is preferred to failing to alloc.
  424. */
  425. vmw_overlay_pause_all(dev_priv);
  426. ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
  427. &vmw_vram_ne_placement,
  428. false, &vmw_dmabuf_bo_free);
  429. vmw_overlay_resume_all(dev_priv);
  430. if (ret != 0)
  431. vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
  432. else
  433. vps->dmabuf_size = size;
  434. return ret;
  435. }
  436. static void
  437. vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
  438. struct drm_plane_state *old_state)
  439. {
  440. struct drm_crtc *crtc = plane->state->crtc;
  441. if (crtc)
  442. crtc->primary->fb = plane->state->fb;
  443. }
  444. static const struct drm_plane_funcs vmw_sou_plane_funcs = {
  445. .update_plane = drm_atomic_helper_update_plane,
  446. .disable_plane = drm_atomic_helper_disable_plane,
  447. .destroy = vmw_du_primary_plane_destroy,
  448. .reset = vmw_du_plane_reset,
  449. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  450. .atomic_destroy_state = vmw_du_plane_destroy_state,
  451. };
  452. static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
  453. .update_plane = drm_atomic_helper_update_plane,
  454. .disable_plane = drm_atomic_helper_disable_plane,
  455. .destroy = vmw_du_cursor_plane_destroy,
  456. .reset = vmw_du_plane_reset,
  457. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  458. .atomic_destroy_state = vmw_du_plane_destroy_state,
  459. };
  460. /*
  461. * Atomic Helpers
  462. */
  463. static const struct
  464. drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
  465. .atomic_check = vmw_du_cursor_plane_atomic_check,
  466. .atomic_update = vmw_du_cursor_plane_atomic_update,
  467. .prepare_fb = vmw_du_cursor_plane_prepare_fb,
  468. .cleanup_fb = vmw_du_plane_cleanup_fb,
  469. };
  470. static const struct
  471. drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
  472. .atomic_check = vmw_du_primary_plane_atomic_check,
  473. .atomic_update = vmw_sou_primary_plane_atomic_update,
  474. .prepare_fb = vmw_sou_primary_plane_prepare_fb,
  475. .cleanup_fb = vmw_sou_primary_plane_cleanup_fb,
  476. };
  477. static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
  478. .prepare = vmw_sou_crtc_helper_prepare,
  479. .commit = vmw_sou_crtc_helper_commit,
  480. .disable = vmw_sou_crtc_helper_disable,
  481. .mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
  482. .atomic_check = vmw_du_crtc_atomic_check,
  483. .atomic_begin = vmw_du_crtc_atomic_begin,
  484. .atomic_flush = vmw_du_crtc_atomic_flush,
  485. };
  486. static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
  487. {
  488. struct vmw_screen_object_unit *sou;
  489. struct drm_device *dev = dev_priv->dev;
  490. struct drm_connector *connector;
  491. struct drm_encoder *encoder;
  492. struct drm_plane *primary, *cursor;
  493. struct drm_crtc *crtc;
  494. int ret;
  495. sou = kzalloc(sizeof(*sou), GFP_KERNEL);
  496. if (!sou)
  497. return -ENOMEM;
  498. sou->base.unit = unit;
  499. crtc = &sou->base.crtc;
  500. encoder = &sou->base.encoder;
  501. connector = &sou->base.connector;
  502. primary = &sou->base.primary;
  503. cursor = &sou->base.cursor;
  504. sou->base.active_implicit = false;
  505. sou->base.pref_active = (unit == 0);
  506. sou->base.pref_width = dev_priv->initial_width;
  507. sou->base.pref_height = dev_priv->initial_height;
  508. sou->base.pref_mode = NULL;
  509. /*
  510. * Remove this after enabling atomic because property values can
  511. * only exist in a state object
  512. */
  513. sou->base.is_implicit = false;
  514. /* Initialize primary plane */
  515. vmw_du_plane_reset(primary);
  516. ret = drm_universal_plane_init(dev, &sou->base.primary,
  517. 0, &vmw_sou_plane_funcs,
  518. vmw_primary_plane_formats,
  519. ARRAY_SIZE(vmw_primary_plane_formats),
  520. DRM_PLANE_TYPE_PRIMARY, NULL);
  521. if (ret) {
  522. DRM_ERROR("Failed to initialize primary plane");
  523. goto err_free;
  524. }
  525. drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
  526. /* Initialize cursor plane */
  527. vmw_du_plane_reset(cursor);
  528. ret = drm_universal_plane_init(dev, &sou->base.cursor,
  529. 0, &vmw_sou_cursor_funcs,
  530. vmw_cursor_plane_formats,
  531. ARRAY_SIZE(vmw_cursor_plane_formats),
  532. DRM_PLANE_TYPE_CURSOR, NULL);
  533. if (ret) {
  534. DRM_ERROR("Failed to initialize cursor plane");
  535. drm_plane_cleanup(&sou->base.primary);
  536. goto err_free;
  537. }
  538. drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
  539. vmw_du_connector_reset(connector);
  540. ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
  541. DRM_MODE_CONNECTOR_VIRTUAL);
  542. if (ret) {
  543. DRM_ERROR("Failed to initialize connector\n");
  544. goto err_free;
  545. }
  546. drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
  547. connector->status = vmw_du_connector_detect(connector, true);
  548. vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
  549. ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
  550. DRM_MODE_ENCODER_VIRTUAL, NULL);
  551. if (ret) {
  552. DRM_ERROR("Failed to initialize encoder\n");
  553. goto err_free_connector;
  554. }
  555. (void) drm_mode_connector_attach_encoder(connector, encoder);
  556. encoder->possible_crtcs = (1 << unit);
  557. encoder->possible_clones = 0;
  558. ret = drm_connector_register(connector);
  559. if (ret) {
  560. DRM_ERROR("Failed to register connector\n");
  561. goto err_free_encoder;
  562. }
  563. vmw_du_crtc_reset(crtc);
  564. ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
  565. &sou->base.cursor,
  566. &vmw_screen_object_crtc_funcs, NULL);
  567. if (ret) {
  568. DRM_ERROR("Failed to initialize CRTC\n");
  569. goto err_free_unregister;
  570. }
  571. drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs);
  572. drm_mode_crtc_set_gamma_size(crtc, 256);
  573. drm_object_attach_property(&connector->base,
  574. dev_priv->hotplug_mode_update_property, 1);
  575. drm_object_attach_property(&connector->base,
  576. dev->mode_config.suggested_x_property, 0);
  577. drm_object_attach_property(&connector->base,
  578. dev->mode_config.suggested_y_property, 0);
  579. if (dev_priv->implicit_placement_property)
  580. drm_object_attach_property
  581. (&connector->base,
  582. dev_priv->implicit_placement_property,
  583. sou->base.is_implicit);
  584. return 0;
  585. err_free_unregister:
  586. drm_connector_unregister(connector);
  587. err_free_encoder:
  588. drm_encoder_cleanup(encoder);
  589. err_free_connector:
  590. drm_connector_cleanup(connector);
  591. err_free:
  592. kfree(sou);
  593. return ret;
  594. }
  595. int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
  596. {
  597. struct drm_device *dev = dev_priv->dev;
  598. int i, ret;
  599. if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
  600. DRM_INFO("Not using screen objects,"
  601. " missing cap SCREEN_OBJECT_2\n");
  602. return -ENOSYS;
  603. }
  604. ret = -ENOMEM;
  605. dev_priv->num_implicit = 0;
  606. dev_priv->implicit_fb = NULL;
  607. ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
  608. if (unlikely(ret != 0))
  609. return ret;
  610. vmw_kms_create_implicit_placement_property(dev_priv, false);
  611. for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
  612. vmw_sou_init(dev_priv, i);
  613. dev_priv->active_display_unit = vmw_du_screen_object;
  614. DRM_INFO("Screen Objects Display Unit initialized\n");
  615. return 0;
  616. }
  617. int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
  618. {
  619. struct drm_device *dev = dev_priv->dev;
  620. drm_vblank_cleanup(dev);
  621. return 0;
  622. }
  623. static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
  624. struct vmw_framebuffer *framebuffer)
  625. {
  626. struct vmw_dma_buffer *buf =
  627. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  628. base)->buffer;
  629. int depth = framebuffer->base.format->depth;
  630. struct {
  631. uint32_t header;
  632. SVGAFifoCmdDefineGMRFB body;
  633. } *cmd;
  634. /* Emulate RGBA support, contrary to svga_reg.h this is not
  635. * supported by hosts. This is only a problem if we are reading
  636. * this value later and expecting what we uploaded back.
  637. */
  638. if (depth == 32)
  639. depth = 24;
  640. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  641. if (!cmd) {
  642. DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
  643. return -ENOMEM;
  644. }
  645. cmd->header = SVGA_CMD_DEFINE_GMRFB;
  646. cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
  647. cmd->body.format.colorDepth = depth;
  648. cmd->body.format.reserved = 0;
  649. cmd->body.bytesPerLine = framebuffer->base.pitches[0];
  650. /* Buffer is reserved in vram or GMR */
  651. vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
  652. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  653. return 0;
  654. }
  655. /**
  656. * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
  657. * blit surface to screen command.
  658. *
  659. * @dirty: The closure structure.
  660. *
  661. * Fills in the missing fields in the command, and translates the cliprects
  662. * to match the destination bounding box encoded.
  663. */
  664. static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
  665. {
  666. struct vmw_kms_sou_surface_dirty *sdirty =
  667. container_of(dirty, typeof(*sdirty), base);
  668. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  669. s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
  670. s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
  671. size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
  672. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  673. int i;
  674. if (!dirty->num_hits) {
  675. vmw_fifo_commit(dirty->dev_priv, 0);
  676. return;
  677. }
  678. cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
  679. cmd->header.size = sizeof(cmd->body) + region_size;
  680. /*
  681. * Use the destination bounding box to specify destination - and
  682. * source bounding regions.
  683. */
  684. cmd->body.destRect.left = sdirty->left;
  685. cmd->body.destRect.right = sdirty->right;
  686. cmd->body.destRect.top = sdirty->top;
  687. cmd->body.destRect.bottom = sdirty->bottom;
  688. cmd->body.srcRect.left = sdirty->left + trans_x;
  689. cmd->body.srcRect.right = sdirty->right + trans_x;
  690. cmd->body.srcRect.top = sdirty->top + trans_y;
  691. cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
  692. cmd->body.srcImage.sid = sdirty->sid;
  693. cmd->body.destScreenId = dirty->unit->unit;
  694. /* Blits are relative to the destination rect. Translate. */
  695. for (i = 0; i < dirty->num_hits; ++i, ++blit) {
  696. blit->left -= sdirty->left;
  697. blit->right -= sdirty->left;
  698. blit->top -= sdirty->top;
  699. blit->bottom -= sdirty->top;
  700. }
  701. vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
  702. sdirty->left = sdirty->top = S32_MAX;
  703. sdirty->right = sdirty->bottom = S32_MIN;
  704. }
  705. /**
  706. * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
  707. *
  708. * @dirty: The closure structure
  709. *
  710. * Encodes a SVGASignedRect cliprect and updates the bounding box of the
  711. * BLIT_SURFACE_TO_SCREEN command.
  712. */
  713. static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
  714. {
  715. struct vmw_kms_sou_surface_dirty *sdirty =
  716. container_of(dirty, typeof(*sdirty), base);
  717. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  718. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  719. /* Destination rect. */
  720. blit += dirty->num_hits;
  721. blit->left = dirty->unit_x1;
  722. blit->top = dirty->unit_y1;
  723. blit->right = dirty->unit_x2;
  724. blit->bottom = dirty->unit_y2;
  725. /* Destination bounding box */
  726. sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
  727. sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
  728. sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
  729. sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
  730. dirty->num_hits++;
  731. }
  732. /**
  733. * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
  734. *
  735. * @dev_priv: Pointer to the device private structure.
  736. * @framebuffer: Pointer to the surface-buffer backed framebuffer.
  737. * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
  738. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  739. * be NULL.
  740. * @srf: Pointer to surface to blit from. If NULL, the surface attached
  741. * to @framebuffer will be used.
  742. * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
  743. * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
  744. * @num_clips: Number of clip rects in @clips.
  745. * @inc: Increment to use when looping over @clips.
  746. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  747. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  748. * case the device has already synchronized.
  749. *
  750. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  751. * interrupted.
  752. */
  753. int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
  754. struct vmw_framebuffer *framebuffer,
  755. struct drm_clip_rect *clips,
  756. struct drm_vmw_rect *vclips,
  757. struct vmw_resource *srf,
  758. s32 dest_x,
  759. s32 dest_y,
  760. unsigned num_clips, int inc,
  761. struct vmw_fence_obj **out_fence)
  762. {
  763. struct vmw_framebuffer_surface *vfbs =
  764. container_of(framebuffer, typeof(*vfbs), base);
  765. struct vmw_kms_sou_surface_dirty sdirty;
  766. int ret;
  767. if (!srf)
  768. srf = &vfbs->surface->res;
  769. ret = vmw_kms_helper_resource_prepare(srf, true);
  770. if (ret)
  771. return ret;
  772. sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
  773. sdirty.base.clip = vmw_sou_surface_clip;
  774. sdirty.base.dev_priv = dev_priv;
  775. sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
  776. sizeof(SVGASignedRect) * num_clips;
  777. sdirty.sid = srf->id;
  778. sdirty.left = sdirty.top = S32_MAX;
  779. sdirty.right = sdirty.bottom = S32_MIN;
  780. sdirty.dst_x = dest_x;
  781. sdirty.dst_y = dest_y;
  782. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  783. dest_x, dest_y, num_clips, inc,
  784. &sdirty.base);
  785. vmw_kms_helper_resource_finish(srf, out_fence);
  786. return ret;
  787. }
  788. /**
  789. * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
  790. *
  791. * @dirty: The closure structure.
  792. *
  793. * Commits a previously built command buffer of readback clips.
  794. */
  795. static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  796. {
  797. if (!dirty->num_hits) {
  798. vmw_fifo_commit(dirty->dev_priv, 0);
  799. return;
  800. }
  801. vmw_fifo_commit(dirty->dev_priv,
  802. sizeof(struct vmw_kms_sou_dmabuf_blit) *
  803. dirty->num_hits);
  804. }
  805. /**
  806. * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
  807. *
  808. * @dirty: The closure structure
  809. *
  810. * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
  811. */
  812. static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
  813. {
  814. struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
  815. blit += dirty->num_hits;
  816. blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
  817. blit->body.destScreenId = dirty->unit->unit;
  818. blit->body.srcOrigin.x = dirty->fb_x;
  819. blit->body.srcOrigin.y = dirty->fb_y;
  820. blit->body.destRect.left = dirty->unit_x1;
  821. blit->body.destRect.top = dirty->unit_y1;
  822. blit->body.destRect.right = dirty->unit_x2;
  823. blit->body.destRect.bottom = dirty->unit_y2;
  824. dirty->num_hits++;
  825. }
  826. /**
  827. * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
  828. *
  829. * @dev_priv: Pointer to the device private structure.
  830. * @framebuffer: Pointer to the dma-buffer backed framebuffer.
  831. * @clips: Array of clip rects.
  832. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  833. * be NULL.
  834. * @num_clips: Number of clip rects in @clips.
  835. * @increment: Increment to use when looping over @clips.
  836. * @interruptible: Whether to perform waits interruptible if possible.
  837. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  838. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  839. * case the device has already synchronized.
  840. *
  841. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  842. * interrupted.
  843. */
  844. int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
  845. struct vmw_framebuffer *framebuffer,
  846. struct drm_clip_rect *clips,
  847. struct drm_vmw_rect *vclips,
  848. unsigned num_clips, int increment,
  849. bool interruptible,
  850. struct vmw_fence_obj **out_fence)
  851. {
  852. struct vmw_dma_buffer *buf =
  853. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  854. base)->buffer;
  855. struct vmw_kms_dirty dirty;
  856. int ret;
  857. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
  858. false);
  859. if (ret)
  860. return ret;
  861. ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
  862. if (unlikely(ret != 0))
  863. goto out_revert;
  864. dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
  865. dirty.clip = vmw_sou_dmabuf_clip;
  866. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
  867. num_clips;
  868. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  869. 0, 0, num_clips, increment, &dirty);
  870. vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
  871. return ret;
  872. out_revert:
  873. vmw_kms_helper_buffer_revert(buf);
  874. return ret;
  875. }
  876. /**
  877. * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
  878. *
  879. * @dirty: The closure structure.
  880. *
  881. * Commits a previously built command buffer of readback clips.
  882. */
  883. static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
  884. {
  885. if (!dirty->num_hits) {
  886. vmw_fifo_commit(dirty->dev_priv, 0);
  887. return;
  888. }
  889. vmw_fifo_commit(dirty->dev_priv,
  890. sizeof(struct vmw_kms_sou_readback_blit) *
  891. dirty->num_hits);
  892. }
  893. /**
  894. * vmw_sou_readback_clip - Callback to encode a readback cliprect.
  895. *
  896. * @dirty: The closure structure
  897. *
  898. * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
  899. */
  900. static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
  901. {
  902. struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
  903. blit += dirty->num_hits;
  904. blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
  905. blit->body.srcScreenId = dirty->unit->unit;
  906. blit->body.destOrigin.x = dirty->fb_x;
  907. blit->body.destOrigin.y = dirty->fb_y;
  908. blit->body.srcRect.left = dirty->unit_x1;
  909. blit->body.srcRect.top = dirty->unit_y1;
  910. blit->body.srcRect.right = dirty->unit_x2;
  911. blit->body.srcRect.bottom = dirty->unit_y2;
  912. dirty->num_hits++;
  913. }
  914. /**
  915. * vmw_kms_sou_readback - Perform a readback from the screen object system to
  916. * a dma-buffer backed framebuffer.
  917. *
  918. * @dev_priv: Pointer to the device private structure.
  919. * @file_priv: Pointer to a struct drm_file identifying the caller.
  920. * Must be set to NULL if @user_fence_rep is NULL.
  921. * @vfb: Pointer to the dma-buffer backed framebuffer.
  922. * @user_fence_rep: User-space provided structure for fence information.
  923. * Must be set to non-NULL if @file_priv is non-NULL.
  924. * @vclips: Array of clip rects.
  925. * @num_clips: Number of clip rects in @vclips.
  926. *
  927. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  928. * interrupted.
  929. */
  930. int vmw_kms_sou_readback(struct vmw_private *dev_priv,
  931. struct drm_file *file_priv,
  932. struct vmw_framebuffer *vfb,
  933. struct drm_vmw_fence_rep __user *user_fence_rep,
  934. struct drm_vmw_rect *vclips,
  935. uint32_t num_clips)
  936. {
  937. struct vmw_dma_buffer *buf =
  938. container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
  939. struct vmw_kms_dirty dirty;
  940. int ret;
  941. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
  942. if (ret)
  943. return ret;
  944. ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
  945. if (unlikely(ret != 0))
  946. goto out_revert;
  947. dirty.fifo_commit = vmw_sou_readback_fifo_commit;
  948. dirty.clip = vmw_sou_readback_clip;
  949. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
  950. num_clips;
  951. ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
  952. 0, 0, num_clips, 1, &dirty);
  953. vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
  954. user_fence_rep);
  955. return ret;
  956. out_revert:
  957. vmw_kms_helper_buffer_revert(buf);
  958. return ret;
  959. }