vmwgfx_scrn.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /**************************************************************************
  2. *
  3. * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_kms.h"
  28. #include <drm/drm_plane_helper.h>
  29. #include <drm/drm_atomic.h>
  30. #include <drm/drm_atomic_helper.h>
  31. #define vmw_crtc_to_sou(x) \
  32. container_of(x, struct vmw_screen_object_unit, base.crtc)
  33. #define vmw_encoder_to_sou(x) \
  34. container_of(x, struct vmw_screen_object_unit, base.encoder)
  35. #define vmw_connector_to_sou(x) \
  36. container_of(x, struct vmw_screen_object_unit, base.connector)
  37. /**
  38. * struct vmw_kms_sou_surface_dirty - Closure structure for
  39. * blit surface to screen command.
  40. * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
  41. * @left: Left side of bounding box.
  42. * @right: Right side of bounding box.
  43. * @top: Top side of bounding box.
  44. * @bottom: Bottom side of bounding box.
  45. * @dst_x: Difference between source clip rects and framebuffer coordinates.
  46. * @dst_y: Difference between source clip rects and framebuffer coordinates.
  47. * @sid: Surface id of surface to copy from.
  48. */
  49. struct vmw_kms_sou_surface_dirty {
  50. struct vmw_kms_dirty base;
  51. s32 left, right, top, bottom;
  52. s32 dst_x, dst_y;
  53. u32 sid;
  54. };
  55. /*
  56. * SVGA commands that are used by this code. Please see the device headers
  57. * for explanation.
  58. */
  59. struct vmw_kms_sou_readback_blit {
  60. uint32 header;
  61. SVGAFifoCmdBlitScreenToGMRFB body;
  62. };
  63. struct vmw_kms_sou_dmabuf_blit {
  64. uint32 header;
  65. SVGAFifoCmdBlitGMRFBToScreen body;
  66. };
  67. struct vmw_kms_sou_dirty_cmd {
  68. SVGA3dCmdHeader header;
  69. SVGA3dCmdBlitSurfaceToScreen body;
  70. };
  71. /**
  72. * Display unit using screen objects.
  73. */
  74. struct vmw_screen_object_unit {
  75. struct vmw_display_unit base;
  76. unsigned long buffer_size; /**< Size of allocated buffer */
  77. struct vmw_dma_buffer *buffer; /**< Backing store buffer */
  78. bool defined;
  79. };
  80. static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
  81. {
  82. vmw_du_cleanup(&sou->base);
  83. kfree(sou);
  84. }
  85. /*
  86. * Screen Object Display Unit CRTC functions
  87. */
  88. static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
  89. {
  90. vmw_sou_destroy(vmw_crtc_to_sou(crtc));
  91. }
  92. /**
  93. * Send the fifo command to create a screen.
  94. */
  95. static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
  96. struct vmw_screen_object_unit *sou,
  97. uint32_t x, uint32_t y,
  98. struct drm_display_mode *mode)
  99. {
  100. size_t fifo_size;
  101. struct {
  102. struct {
  103. uint32_t cmdType;
  104. } header;
  105. SVGAScreenObject obj;
  106. } *cmd;
  107. BUG_ON(!sou->buffer);
  108. fifo_size = sizeof(*cmd);
  109. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  110. /* The hardware has hung, nothing we can do about it here. */
  111. if (unlikely(cmd == NULL)) {
  112. DRM_ERROR("Fifo reserve failed.\n");
  113. return -ENOMEM;
  114. }
  115. memset(cmd, 0, fifo_size);
  116. cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
  117. cmd->obj.structSize = sizeof(SVGAScreenObject);
  118. cmd->obj.id = sou->base.unit;
  119. cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
  120. (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
  121. cmd->obj.size.width = mode->hdisplay;
  122. cmd->obj.size.height = mode->vdisplay;
  123. if (sou->base.is_implicit) {
  124. cmd->obj.root.x = x;
  125. cmd->obj.root.y = y;
  126. } else {
  127. cmd->obj.root.x = sou->base.gui_x;
  128. cmd->obj.root.y = sou->base.gui_y;
  129. }
  130. sou->base.set_gui_x = cmd->obj.root.x;
  131. sou->base.set_gui_y = cmd->obj.root.y;
  132. /* Ok to assume that buffer is pinned in vram */
  133. vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
  134. cmd->obj.backingStore.pitch = mode->hdisplay * 4;
  135. vmw_fifo_commit(dev_priv, fifo_size);
  136. sou->defined = true;
  137. return 0;
  138. }
  139. /**
  140. * Send the fifo command to destroy a screen.
  141. */
  142. static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
  143. struct vmw_screen_object_unit *sou)
  144. {
  145. size_t fifo_size;
  146. int ret;
  147. struct {
  148. struct {
  149. uint32_t cmdType;
  150. } header;
  151. SVGAFifoCmdDestroyScreen body;
  152. } *cmd;
  153. /* no need to do anything */
  154. if (unlikely(!sou->defined))
  155. return 0;
  156. fifo_size = sizeof(*cmd);
  157. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  158. /* the hardware has hung, nothing we can do about it here */
  159. if (unlikely(cmd == NULL)) {
  160. DRM_ERROR("Fifo reserve failed.\n");
  161. return -ENOMEM;
  162. }
  163. memset(cmd, 0, fifo_size);
  164. cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
  165. cmd->body.screenId = sou->base.unit;
  166. vmw_fifo_commit(dev_priv, fifo_size);
  167. /* Force sync */
  168. ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
  169. if (unlikely(ret != 0))
  170. DRM_ERROR("Failed to sync with HW");
  171. else
  172. sou->defined = false;
  173. return ret;
  174. }
  175. /**
  176. * vmw_sou_crtc_mode_set_nofb - Create new screen
  177. *
  178. * @crtc: CRTC associated with the new screen
  179. *
  180. * This function creates/destroys a screen. This function cannot fail, so if
  181. * somehow we run into a failure, just do the best we can to get out.
  182. */
  183. static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
  184. {
  185. struct vmw_private *dev_priv;
  186. struct vmw_screen_object_unit *sou;
  187. struct vmw_framebuffer *vfb;
  188. struct drm_framebuffer *fb;
  189. struct drm_plane_state *ps;
  190. struct vmw_plane_state *vps;
  191. int ret;
  192. sou = vmw_crtc_to_sou(crtc);
  193. dev_priv = vmw_priv(crtc->dev);
  194. ps = crtc->primary->state;
  195. fb = ps->fb;
  196. vps = vmw_plane_state_to_vps(ps);
  197. vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
  198. if (sou->defined) {
  199. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  200. if (ret) {
  201. DRM_ERROR("Failed to destroy Screen Object\n");
  202. return;
  203. }
  204. }
  205. if (vfb) {
  206. sou->buffer = vps->dmabuf;
  207. sou->buffer_size = vps->dmabuf_size;
  208. ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
  209. &crtc->mode);
  210. if (ret)
  211. DRM_ERROR("Failed to define Screen Object %dx%d\n",
  212. crtc->x, crtc->y);
  213. vmw_kms_add_active(dev_priv, &sou->base, vfb);
  214. } else {
  215. sou->buffer = NULL;
  216. sou->buffer_size = 0;
  217. vmw_kms_del_active(dev_priv, &sou->base);
  218. }
  219. }
  220. /**
  221. * vmw_sou_crtc_helper_prepare - Noop
  222. *
  223. * @crtc: CRTC associated with the new screen
  224. *
  225. * Prepares the CRTC for a mode set, but we don't need to do anything here.
  226. */
  227. static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
  228. {
  229. }
  230. /**
  231. * vmw_sou_crtc_helper_commit - Noop
  232. *
  233. * @crtc: CRTC associated with the new screen
  234. *
  235. * This is called after a mode set has been completed.
  236. */
  237. static void vmw_sou_crtc_helper_commit(struct drm_crtc *crtc)
  238. {
  239. }
  240. /**
  241. * vmw_sou_crtc_helper_disable - Turns off CRTC
  242. *
  243. * @crtc: CRTC to be turned off
  244. */
  245. static void vmw_sou_crtc_helper_disable(struct drm_crtc *crtc)
  246. {
  247. struct vmw_private *dev_priv;
  248. struct vmw_screen_object_unit *sou;
  249. int ret;
  250. if (!crtc) {
  251. DRM_ERROR("CRTC is NULL\n");
  252. return;
  253. }
  254. sou = vmw_crtc_to_sou(crtc);
  255. dev_priv = vmw_priv(crtc->dev);
  256. if (sou->defined) {
  257. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  258. if (ret)
  259. DRM_ERROR("Failed to destroy Screen Object\n");
  260. }
  261. }
  262. static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
  263. struct drm_framebuffer *new_fb,
  264. struct drm_pending_vblank_event *event,
  265. uint32_t flags)
  266. {
  267. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  268. struct drm_framebuffer *old_fb = crtc->primary->fb;
  269. struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
  270. struct vmw_fence_obj *fence = NULL;
  271. struct drm_vmw_rect vclips;
  272. int ret;
  273. if (!vmw_kms_crtc_flippable(dev_priv, crtc))
  274. return -EINVAL;
  275. flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
  276. ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags);
  277. if (ret) {
  278. DRM_ERROR("Page flip error %d.\n", ret);
  279. return ret;
  280. }
  281. /* do a full screen dirty update */
  282. vclips.x = crtc->x;
  283. vclips.y = crtc->y;
  284. vclips.w = crtc->mode.hdisplay;
  285. vclips.h = crtc->mode.vdisplay;
  286. if (vfb->dmabuf)
  287. ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
  288. NULL, &vclips, 1, 1,
  289. true, &fence);
  290. else
  291. ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
  292. NULL, &vclips, NULL,
  293. 0, 0, 1, 1, &fence);
  294. if (ret != 0)
  295. goto out_no_fence;
  296. if (!fence) {
  297. ret = -EINVAL;
  298. goto out_no_fence;
  299. }
  300. if (event) {
  301. struct drm_file *file_priv = event->base.file_priv;
  302. ret = vmw_event_fence_action_queue(file_priv, fence,
  303. &event->base,
  304. &event->event.tv_sec,
  305. &event->event.tv_usec,
  306. true);
  307. }
  308. /*
  309. * No need to hold on to this now. The only cleanup
  310. * we need to do if we fail is unref the fence.
  311. */
  312. vmw_fence_obj_unreference(&fence);
  313. if (vmw_crtc_to_du(crtc)->is_implicit)
  314. vmw_kms_update_implicit_fb(dev_priv, crtc);
  315. return ret;
  316. out_no_fence:
  317. drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb);
  318. return ret;
  319. }
  320. static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
  321. .gamma_set = vmw_du_crtc_gamma_set,
  322. .destroy = vmw_sou_crtc_destroy,
  323. .reset = vmw_du_crtc_reset,
  324. .atomic_duplicate_state = vmw_du_crtc_duplicate_state,
  325. .atomic_destroy_state = vmw_du_crtc_destroy_state,
  326. .set_config = vmw_kms_set_config,
  327. .page_flip = vmw_sou_crtc_page_flip,
  328. };
  329. /*
  330. * Screen Object Display Unit encoder functions
  331. */
  332. static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
  333. {
  334. vmw_sou_destroy(vmw_encoder_to_sou(encoder));
  335. }
  336. static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
  337. .destroy = vmw_sou_encoder_destroy,
  338. };
  339. /*
  340. * Screen Object Display Unit connector functions
  341. */
  342. static void vmw_sou_connector_destroy(struct drm_connector *connector)
  343. {
  344. vmw_sou_destroy(vmw_connector_to_sou(connector));
  345. }
  346. static const struct drm_connector_funcs vmw_sou_connector_funcs = {
  347. .dpms = vmw_du_connector_dpms,
  348. .detect = vmw_du_connector_detect,
  349. .fill_modes = vmw_du_connector_fill_modes,
  350. .set_property = vmw_du_connector_set_property,
  351. .destroy = vmw_sou_connector_destroy,
  352. .reset = vmw_du_connector_reset,
  353. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  354. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  355. .atomic_set_property = vmw_du_connector_atomic_set_property,
  356. .atomic_get_property = vmw_du_connector_atomic_get_property,
  357. };
  358. static const struct
  359. drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
  360. .best_encoder = drm_atomic_helper_best_encoder,
  361. };
  362. /*
  363. * Screen Object Display Plane Functions
  364. */
  365. /**
  366. * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
  367. *
  368. * @plane: display plane
  369. * @old_state: Contains the FB to clean up
  370. *
  371. * Unpins the display surface
  372. *
  373. * Returns 0 on success
  374. */
  375. static void
  376. vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
  377. struct drm_plane_state *old_state)
  378. {
  379. struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
  380. vmw_dmabuf_unreference(&vps->dmabuf);
  381. vps->dmabuf_size = 0;
  382. vmw_du_plane_cleanup_fb(plane, old_state);
  383. }
  384. /**
  385. * vmw_sou_primary_plane_prepare_fb - allocate backing buffer
  386. *
  387. * @plane: display plane
  388. * @new_state: info on the new plane state, including the FB
  389. *
  390. * The SOU backing buffer is our equivalent of the display plane.
  391. *
  392. * Returns 0 on success
  393. */
  394. static int
  395. vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
  396. struct drm_plane_state *new_state)
  397. {
  398. struct drm_framebuffer *new_fb = new_state->fb;
  399. struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
  400. struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
  401. struct vmw_private *dev_priv;
  402. size_t size;
  403. int ret;
  404. if (!new_fb) {
  405. vmw_dmabuf_unreference(&vps->dmabuf);
  406. vps->dmabuf_size = 0;
  407. return 0;
  408. }
  409. size = new_state->crtc_w * new_state->crtc_h * 4;
  410. if (vps->dmabuf) {
  411. if (vps->dmabuf_size == size)
  412. return 0;
  413. vmw_dmabuf_unreference(&vps->dmabuf);
  414. vps->dmabuf_size = 0;
  415. }
  416. vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
  417. if (!vps->dmabuf)
  418. return -ENOMEM;
  419. dev_priv = vmw_priv(crtc->dev);
  420. vmw_svga_enable(dev_priv);
  421. /* After we have alloced the backing store might not be able to
  422. * resume the overlays, this is preferred to failing to alloc.
  423. */
  424. vmw_overlay_pause_all(dev_priv);
  425. ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
  426. &vmw_vram_ne_placement,
  427. false, &vmw_dmabuf_bo_free);
  428. vmw_overlay_resume_all(dev_priv);
  429. if (ret != 0)
  430. vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
  431. else
  432. vps->dmabuf_size = size;
  433. return ret;
  434. }
  435. static void
  436. vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
  437. struct drm_plane_state *old_state)
  438. {
  439. struct drm_crtc *crtc = plane->state->crtc;
  440. if (crtc)
  441. crtc->primary->fb = plane->state->fb;
  442. }
  443. static const struct drm_plane_funcs vmw_sou_plane_funcs = {
  444. .update_plane = drm_atomic_helper_update_plane,
  445. .disable_plane = drm_atomic_helper_disable_plane,
  446. .destroy = vmw_du_primary_plane_destroy,
  447. .reset = vmw_du_plane_reset,
  448. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  449. .atomic_destroy_state = vmw_du_plane_destroy_state,
  450. };
  451. static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
  452. .update_plane = drm_atomic_helper_update_plane,
  453. .disable_plane = drm_atomic_helper_disable_plane,
  454. .destroy = vmw_du_cursor_plane_destroy,
  455. .reset = vmw_du_plane_reset,
  456. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  457. .atomic_destroy_state = vmw_du_plane_destroy_state,
  458. };
  459. /*
  460. * Atomic Helpers
  461. */
  462. static const struct
  463. drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
  464. .atomic_check = vmw_du_cursor_plane_atomic_check,
  465. .atomic_update = vmw_du_cursor_plane_atomic_update,
  466. .prepare_fb = vmw_du_cursor_plane_prepare_fb,
  467. .cleanup_fb = vmw_du_plane_cleanup_fb,
  468. };
  469. static const struct
  470. drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
  471. .atomic_check = vmw_du_primary_plane_atomic_check,
  472. .atomic_update = vmw_sou_primary_plane_atomic_update,
  473. .prepare_fb = vmw_sou_primary_plane_prepare_fb,
  474. .cleanup_fb = vmw_sou_primary_plane_cleanup_fb,
  475. };
  476. static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
  477. .prepare = vmw_sou_crtc_helper_prepare,
  478. .commit = vmw_sou_crtc_helper_commit,
  479. .disable = vmw_sou_crtc_helper_disable,
  480. .mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
  481. .atomic_check = vmw_du_crtc_atomic_check,
  482. .atomic_begin = vmw_du_crtc_atomic_begin,
  483. .atomic_flush = vmw_du_crtc_atomic_flush,
  484. };
  485. static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
  486. {
  487. struct vmw_screen_object_unit *sou;
  488. struct drm_device *dev = dev_priv->dev;
  489. struct drm_connector *connector;
  490. struct drm_encoder *encoder;
  491. struct drm_plane *primary, *cursor;
  492. struct drm_crtc *crtc;
  493. int ret;
  494. sou = kzalloc(sizeof(*sou), GFP_KERNEL);
  495. if (!sou)
  496. return -ENOMEM;
  497. sou->base.unit = unit;
  498. crtc = &sou->base.crtc;
  499. encoder = &sou->base.encoder;
  500. connector = &sou->base.connector;
  501. primary = &sou->base.primary;
  502. cursor = &sou->base.cursor;
  503. sou->base.active_implicit = false;
  504. sou->base.pref_active = (unit == 0);
  505. sou->base.pref_width = dev_priv->initial_width;
  506. sou->base.pref_height = dev_priv->initial_height;
  507. sou->base.pref_mode = NULL;
  508. /*
  509. * Remove this after enabling atomic because property values can
  510. * only exist in a state object
  511. */
  512. sou->base.is_implicit = false;
  513. /* Initialize primary plane */
  514. vmw_du_plane_reset(primary);
  515. ret = drm_universal_plane_init(dev, &sou->base.primary,
  516. 0, &vmw_sou_plane_funcs,
  517. vmw_primary_plane_formats,
  518. ARRAY_SIZE(vmw_primary_plane_formats),
  519. DRM_PLANE_TYPE_PRIMARY, NULL);
  520. if (ret) {
  521. DRM_ERROR("Failed to initialize primary plane");
  522. goto err_free;
  523. }
  524. drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
  525. /* Initialize cursor plane */
  526. vmw_du_plane_reset(cursor);
  527. ret = drm_universal_plane_init(dev, &sou->base.cursor,
  528. 0, &vmw_sou_cursor_funcs,
  529. vmw_cursor_plane_formats,
  530. ARRAY_SIZE(vmw_cursor_plane_formats),
  531. DRM_PLANE_TYPE_CURSOR, NULL);
  532. if (ret) {
  533. DRM_ERROR("Failed to initialize cursor plane");
  534. drm_plane_cleanup(&sou->base.primary);
  535. goto err_free;
  536. }
  537. drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
  538. vmw_du_connector_reset(connector);
  539. ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
  540. DRM_MODE_CONNECTOR_VIRTUAL);
  541. if (ret) {
  542. DRM_ERROR("Failed to initialize connector\n");
  543. goto err_free;
  544. }
  545. drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
  546. connector->status = vmw_du_connector_detect(connector, true);
  547. vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
  548. ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
  549. DRM_MODE_ENCODER_VIRTUAL, NULL);
  550. if (ret) {
  551. DRM_ERROR("Failed to initialize encoder\n");
  552. goto err_free_connector;
  553. }
  554. (void) drm_mode_connector_attach_encoder(connector, encoder);
  555. encoder->possible_crtcs = (1 << unit);
  556. encoder->possible_clones = 0;
  557. ret = drm_connector_register(connector);
  558. if (ret) {
  559. DRM_ERROR("Failed to register connector\n");
  560. goto err_free_encoder;
  561. }
  562. vmw_du_crtc_reset(crtc);
  563. ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
  564. &sou->base.cursor,
  565. &vmw_screen_object_crtc_funcs, NULL);
  566. if (ret) {
  567. DRM_ERROR("Failed to initialize CRTC\n");
  568. goto err_free_unregister;
  569. }
  570. drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs);
  571. drm_mode_crtc_set_gamma_size(crtc, 256);
  572. drm_object_attach_property(&connector->base,
  573. dev_priv->hotplug_mode_update_property, 1);
  574. drm_object_attach_property(&connector->base,
  575. dev->mode_config.suggested_x_property, 0);
  576. drm_object_attach_property(&connector->base,
  577. dev->mode_config.suggested_y_property, 0);
  578. if (dev_priv->implicit_placement_property)
  579. drm_object_attach_property
  580. (&connector->base,
  581. dev_priv->implicit_placement_property,
  582. sou->base.is_implicit);
  583. return 0;
  584. err_free_unregister:
  585. drm_connector_unregister(connector);
  586. err_free_encoder:
  587. drm_encoder_cleanup(encoder);
  588. err_free_connector:
  589. drm_connector_cleanup(connector);
  590. err_free:
  591. kfree(sou);
  592. return ret;
  593. }
  594. int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
  595. {
  596. struct drm_device *dev = dev_priv->dev;
  597. int i, ret;
  598. if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
  599. DRM_INFO("Not using screen objects,"
  600. " missing cap SCREEN_OBJECT_2\n");
  601. return -ENOSYS;
  602. }
  603. ret = -ENOMEM;
  604. dev_priv->num_implicit = 0;
  605. dev_priv->implicit_fb = NULL;
  606. ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
  607. if (unlikely(ret != 0))
  608. return ret;
  609. vmw_kms_create_implicit_placement_property(dev_priv, false);
  610. for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
  611. vmw_sou_init(dev_priv, i);
  612. dev_priv->active_display_unit = vmw_du_screen_object;
  613. DRM_INFO("Screen Objects Display Unit initialized\n");
  614. return 0;
  615. }
  616. int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
  617. {
  618. struct drm_device *dev = dev_priv->dev;
  619. drm_vblank_cleanup(dev);
  620. return 0;
  621. }
  622. static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
  623. struct vmw_framebuffer *framebuffer)
  624. {
  625. struct vmw_dma_buffer *buf =
  626. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  627. base)->buffer;
  628. int depth = framebuffer->base.format->depth;
  629. struct {
  630. uint32_t header;
  631. SVGAFifoCmdDefineGMRFB body;
  632. } *cmd;
  633. /* Emulate RGBA support, contrary to svga_reg.h this is not
  634. * supported by hosts. This is only a problem if we are reading
  635. * this value later and expecting what we uploaded back.
  636. */
  637. if (depth == 32)
  638. depth = 24;
  639. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  640. if (!cmd) {
  641. DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
  642. return -ENOMEM;
  643. }
  644. cmd->header = SVGA_CMD_DEFINE_GMRFB;
  645. cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
  646. cmd->body.format.colorDepth = depth;
  647. cmd->body.format.reserved = 0;
  648. cmd->body.bytesPerLine = framebuffer->base.pitches[0];
  649. /* Buffer is reserved in vram or GMR */
  650. vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
  651. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  652. return 0;
  653. }
  654. /**
  655. * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
  656. * blit surface to screen command.
  657. *
  658. * @dirty: The closure structure.
  659. *
  660. * Fills in the missing fields in the command, and translates the cliprects
  661. * to match the destination bounding box encoded.
  662. */
  663. static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
  664. {
  665. struct vmw_kms_sou_surface_dirty *sdirty =
  666. container_of(dirty, typeof(*sdirty), base);
  667. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  668. s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
  669. s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
  670. size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
  671. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  672. int i;
  673. if (!dirty->num_hits) {
  674. vmw_fifo_commit(dirty->dev_priv, 0);
  675. return;
  676. }
  677. cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
  678. cmd->header.size = sizeof(cmd->body) + region_size;
  679. /*
  680. * Use the destination bounding box to specify destination - and
  681. * source bounding regions.
  682. */
  683. cmd->body.destRect.left = sdirty->left;
  684. cmd->body.destRect.right = sdirty->right;
  685. cmd->body.destRect.top = sdirty->top;
  686. cmd->body.destRect.bottom = sdirty->bottom;
  687. cmd->body.srcRect.left = sdirty->left + trans_x;
  688. cmd->body.srcRect.right = sdirty->right + trans_x;
  689. cmd->body.srcRect.top = sdirty->top + trans_y;
  690. cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
  691. cmd->body.srcImage.sid = sdirty->sid;
  692. cmd->body.destScreenId = dirty->unit->unit;
  693. /* Blits are relative to the destination rect. Translate. */
  694. for (i = 0; i < dirty->num_hits; ++i, ++blit) {
  695. blit->left -= sdirty->left;
  696. blit->right -= sdirty->left;
  697. blit->top -= sdirty->top;
  698. blit->bottom -= sdirty->top;
  699. }
  700. vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
  701. sdirty->left = sdirty->top = S32_MAX;
  702. sdirty->right = sdirty->bottom = S32_MIN;
  703. }
  704. /**
  705. * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
  706. *
  707. * @dirty: The closure structure
  708. *
  709. * Encodes a SVGASignedRect cliprect and updates the bounding box of the
  710. * BLIT_SURFACE_TO_SCREEN command.
  711. */
  712. static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
  713. {
  714. struct vmw_kms_sou_surface_dirty *sdirty =
  715. container_of(dirty, typeof(*sdirty), base);
  716. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  717. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  718. /* Destination rect. */
  719. blit += dirty->num_hits;
  720. blit->left = dirty->unit_x1;
  721. blit->top = dirty->unit_y1;
  722. blit->right = dirty->unit_x2;
  723. blit->bottom = dirty->unit_y2;
  724. /* Destination bounding box */
  725. sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
  726. sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
  727. sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
  728. sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
  729. dirty->num_hits++;
  730. }
  731. /**
  732. * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
  733. *
  734. * @dev_priv: Pointer to the device private structure.
  735. * @framebuffer: Pointer to the surface-buffer backed framebuffer.
  736. * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
  737. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  738. * be NULL.
  739. * @srf: Pointer to surface to blit from. If NULL, the surface attached
  740. * to @framebuffer will be used.
  741. * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
  742. * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
  743. * @num_clips: Number of clip rects in @clips.
  744. * @inc: Increment to use when looping over @clips.
  745. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  746. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  747. * case the device has already synchronized.
  748. *
  749. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  750. * interrupted.
  751. */
  752. int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
  753. struct vmw_framebuffer *framebuffer,
  754. struct drm_clip_rect *clips,
  755. struct drm_vmw_rect *vclips,
  756. struct vmw_resource *srf,
  757. s32 dest_x,
  758. s32 dest_y,
  759. unsigned num_clips, int inc,
  760. struct vmw_fence_obj **out_fence)
  761. {
  762. struct vmw_framebuffer_surface *vfbs =
  763. container_of(framebuffer, typeof(*vfbs), base);
  764. struct vmw_kms_sou_surface_dirty sdirty;
  765. int ret;
  766. if (!srf)
  767. srf = &vfbs->surface->res;
  768. ret = vmw_kms_helper_resource_prepare(srf, true);
  769. if (ret)
  770. return ret;
  771. sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
  772. sdirty.base.clip = vmw_sou_surface_clip;
  773. sdirty.base.dev_priv = dev_priv;
  774. sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
  775. sizeof(SVGASignedRect) * num_clips;
  776. sdirty.sid = srf->id;
  777. sdirty.left = sdirty.top = S32_MAX;
  778. sdirty.right = sdirty.bottom = S32_MIN;
  779. sdirty.dst_x = dest_x;
  780. sdirty.dst_y = dest_y;
  781. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  782. dest_x, dest_y, num_clips, inc,
  783. &sdirty.base);
  784. vmw_kms_helper_resource_finish(srf, out_fence);
  785. return ret;
  786. }
  787. /**
  788. * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
  789. *
  790. * @dirty: The closure structure.
  791. *
  792. * Commits a previously built command buffer of readback clips.
  793. */
  794. static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  795. {
  796. if (!dirty->num_hits) {
  797. vmw_fifo_commit(dirty->dev_priv, 0);
  798. return;
  799. }
  800. vmw_fifo_commit(dirty->dev_priv,
  801. sizeof(struct vmw_kms_sou_dmabuf_blit) *
  802. dirty->num_hits);
  803. }
  804. /**
  805. * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
  806. *
  807. * @dirty: The closure structure
  808. *
  809. * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
  810. */
  811. static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
  812. {
  813. struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
  814. blit += dirty->num_hits;
  815. blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
  816. blit->body.destScreenId = dirty->unit->unit;
  817. blit->body.srcOrigin.x = dirty->fb_x;
  818. blit->body.srcOrigin.y = dirty->fb_y;
  819. blit->body.destRect.left = dirty->unit_x1;
  820. blit->body.destRect.top = dirty->unit_y1;
  821. blit->body.destRect.right = dirty->unit_x2;
  822. blit->body.destRect.bottom = dirty->unit_y2;
  823. dirty->num_hits++;
  824. }
  825. /**
  826. * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
  827. *
  828. * @dev_priv: Pointer to the device private structure.
  829. * @framebuffer: Pointer to the dma-buffer backed framebuffer.
  830. * @clips: Array of clip rects.
  831. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  832. * be NULL.
  833. * @num_clips: Number of clip rects in @clips.
  834. * @increment: Increment to use when looping over @clips.
  835. * @interruptible: Whether to perform waits interruptible if possible.
  836. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  837. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  838. * case the device has already synchronized.
  839. *
  840. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  841. * interrupted.
  842. */
  843. int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
  844. struct vmw_framebuffer *framebuffer,
  845. struct drm_clip_rect *clips,
  846. struct drm_vmw_rect *vclips,
  847. unsigned num_clips, int increment,
  848. bool interruptible,
  849. struct vmw_fence_obj **out_fence)
  850. {
  851. struct vmw_dma_buffer *buf =
  852. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  853. base)->buffer;
  854. struct vmw_kms_dirty dirty;
  855. int ret;
  856. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
  857. false);
  858. if (ret)
  859. return ret;
  860. ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
  861. if (unlikely(ret != 0))
  862. goto out_revert;
  863. dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
  864. dirty.clip = vmw_sou_dmabuf_clip;
  865. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
  866. num_clips;
  867. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  868. 0, 0, num_clips, increment, &dirty);
  869. vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
  870. return ret;
  871. out_revert:
  872. vmw_kms_helper_buffer_revert(buf);
  873. return ret;
  874. }
  875. /**
  876. * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
  877. *
  878. * @dirty: The closure structure.
  879. *
  880. * Commits a previously built command buffer of readback clips.
  881. */
  882. static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
  883. {
  884. if (!dirty->num_hits) {
  885. vmw_fifo_commit(dirty->dev_priv, 0);
  886. return;
  887. }
  888. vmw_fifo_commit(dirty->dev_priv,
  889. sizeof(struct vmw_kms_sou_readback_blit) *
  890. dirty->num_hits);
  891. }
  892. /**
  893. * vmw_sou_readback_clip - Callback to encode a readback cliprect.
  894. *
  895. * @dirty: The closure structure
  896. *
  897. * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
  898. */
  899. static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
  900. {
  901. struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
  902. blit += dirty->num_hits;
  903. blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
  904. blit->body.srcScreenId = dirty->unit->unit;
  905. blit->body.destOrigin.x = dirty->fb_x;
  906. blit->body.destOrigin.y = dirty->fb_y;
  907. blit->body.srcRect.left = dirty->unit_x1;
  908. blit->body.srcRect.top = dirty->unit_y1;
  909. blit->body.srcRect.right = dirty->unit_x2;
  910. blit->body.srcRect.bottom = dirty->unit_y2;
  911. dirty->num_hits++;
  912. }
  913. /**
  914. * vmw_kms_sou_readback - Perform a readback from the screen object system to
  915. * a dma-buffer backed framebuffer.
  916. *
  917. * @dev_priv: Pointer to the device private structure.
  918. * @file_priv: Pointer to a struct drm_file identifying the caller.
  919. * Must be set to NULL if @user_fence_rep is NULL.
  920. * @vfb: Pointer to the dma-buffer backed framebuffer.
  921. * @user_fence_rep: User-space provided structure for fence information.
  922. * Must be set to non-NULL if @file_priv is non-NULL.
  923. * @vclips: Array of clip rects.
  924. * @num_clips: Number of clip rects in @vclips.
  925. *
  926. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  927. * interrupted.
  928. */
  929. int vmw_kms_sou_readback(struct vmw_private *dev_priv,
  930. struct drm_file *file_priv,
  931. struct vmw_framebuffer *vfb,
  932. struct drm_vmw_fence_rep __user *user_fence_rep,
  933. struct drm_vmw_rect *vclips,
  934. uint32_t num_clips)
  935. {
  936. struct vmw_dma_buffer *buf =
  937. container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
  938. struct vmw_kms_dirty dirty;
  939. int ret;
  940. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
  941. if (ret)
  942. return ret;
  943. ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
  944. if (unlikely(ret != 0))
  945. goto out_revert;
  946. dirty.fifo_commit = vmw_sou_readback_fifo_commit;
  947. dirty.clip = vmw_sou_readback_clip;
  948. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
  949. num_clips;
  950. ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
  951. 0, 0, num_clips, 1, &dirty);
  952. vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
  953. user_fence_rep);
  954. return ret;
  955. out_revert:
  956. vmw_kms_helper_buffer_revert(buf);
  957. return ret;
  958. }