vmwgfx_scrn.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134
  1. /**************************************************************************
  2. *
  3. * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_kms.h"
  28. #include <drm/drm_plane_helper.h>
  29. #include <drm/drm_atomic.h>
  30. #include <drm/drm_atomic_helper.h>
  31. #define vmw_crtc_to_sou(x) \
  32. container_of(x, struct vmw_screen_object_unit, base.crtc)
  33. #define vmw_encoder_to_sou(x) \
  34. container_of(x, struct vmw_screen_object_unit, base.encoder)
  35. #define vmw_connector_to_sou(x) \
  36. container_of(x, struct vmw_screen_object_unit, base.connector)
  37. /**
  38. * struct vmw_kms_sou_surface_dirty - Closure structure for
  39. * blit surface to screen command.
  40. * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
  41. * @left: Left side of bounding box.
  42. * @right: Right side of bounding box.
  43. * @top: Top side of bounding box.
  44. * @bottom: Bottom side of bounding box.
  45. * @dst_x: Difference between source clip rects and framebuffer coordinates.
  46. * @dst_y: Difference between source clip rects and framebuffer coordinates.
  47. * @sid: Surface id of surface to copy from.
  48. */
  49. struct vmw_kms_sou_surface_dirty {
  50. struct vmw_kms_dirty base;
  51. s32 left, right, top, bottom;
  52. s32 dst_x, dst_y;
  53. u32 sid;
  54. };
  55. /*
  56. * SVGA commands that are used by this code. Please see the device headers
  57. * for explanation.
  58. */
  59. struct vmw_kms_sou_readback_blit {
  60. uint32 header;
  61. SVGAFifoCmdBlitScreenToGMRFB body;
  62. };
  63. struct vmw_kms_sou_dmabuf_blit {
  64. uint32 header;
  65. SVGAFifoCmdBlitGMRFBToScreen body;
  66. };
  67. struct vmw_kms_sou_dirty_cmd {
  68. SVGA3dCmdHeader header;
  69. SVGA3dCmdBlitSurfaceToScreen body;
  70. };
  71. /**
  72. * Display unit using screen objects.
  73. */
  74. struct vmw_screen_object_unit {
  75. struct vmw_display_unit base;
  76. unsigned long buffer_size; /**< Size of allocated buffer */
  77. struct vmw_dma_buffer *buffer; /**< Backing store buffer */
  78. bool defined;
  79. };
  80. static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
  81. {
  82. vmw_du_cleanup(&sou->base);
  83. kfree(sou);
  84. }
  85. /*
  86. * Screen Object Display Unit CRTC functions
  87. */
  88. static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
  89. {
  90. vmw_sou_destroy(vmw_crtc_to_sou(crtc));
  91. }
  92. /**
  93. * Send the fifo command to create a screen.
  94. */
  95. static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
  96. struct vmw_screen_object_unit *sou,
  97. uint32_t x, uint32_t y,
  98. struct drm_display_mode *mode)
  99. {
  100. size_t fifo_size;
  101. struct {
  102. struct {
  103. uint32_t cmdType;
  104. } header;
  105. SVGAScreenObject obj;
  106. } *cmd;
  107. BUG_ON(!sou->buffer);
  108. fifo_size = sizeof(*cmd);
  109. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  110. /* The hardware has hung, nothing we can do about it here. */
  111. if (unlikely(cmd == NULL)) {
  112. DRM_ERROR("Fifo reserve failed.\n");
  113. return -ENOMEM;
  114. }
  115. memset(cmd, 0, fifo_size);
  116. cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
  117. cmd->obj.structSize = sizeof(SVGAScreenObject);
  118. cmd->obj.id = sou->base.unit;
  119. cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
  120. (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
  121. cmd->obj.size.width = mode->hdisplay;
  122. cmd->obj.size.height = mode->vdisplay;
  123. if (sou->base.is_implicit) {
  124. cmd->obj.root.x = x;
  125. cmd->obj.root.y = y;
  126. } else {
  127. cmd->obj.root.x = sou->base.gui_x;
  128. cmd->obj.root.y = sou->base.gui_y;
  129. }
  130. sou->base.set_gui_x = cmd->obj.root.x;
  131. sou->base.set_gui_y = cmd->obj.root.y;
  132. /* Ok to assume that buffer is pinned in vram */
  133. vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
  134. cmd->obj.backingStore.pitch = mode->hdisplay * 4;
  135. vmw_fifo_commit(dev_priv, fifo_size);
  136. sou->defined = true;
  137. return 0;
  138. }
  139. /**
  140. * Send the fifo command to destroy a screen.
  141. */
  142. static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
  143. struct vmw_screen_object_unit *sou)
  144. {
  145. size_t fifo_size;
  146. int ret;
  147. struct {
  148. struct {
  149. uint32_t cmdType;
  150. } header;
  151. SVGAFifoCmdDestroyScreen body;
  152. } *cmd;
  153. /* no need to do anything */
  154. if (unlikely(!sou->defined))
  155. return 0;
  156. fifo_size = sizeof(*cmd);
  157. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  158. /* the hardware has hung, nothing we can do about it here */
  159. if (unlikely(cmd == NULL)) {
  160. DRM_ERROR("Fifo reserve failed.\n");
  161. return -ENOMEM;
  162. }
  163. memset(cmd, 0, fifo_size);
  164. cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
  165. cmd->body.screenId = sou->base.unit;
  166. vmw_fifo_commit(dev_priv, fifo_size);
  167. /* Force sync */
  168. ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
  169. if (unlikely(ret != 0))
  170. DRM_ERROR("Failed to sync with HW");
  171. else
  172. sou->defined = false;
  173. return ret;
  174. }
  175. /**
  176. * vmw_sou_crtc_mode_set_nofb - Create new screen
  177. *
  178. * @crtc: CRTC associated with the new screen
  179. *
  180. * This function creates/destroys a screen. This function cannot fail, so if
  181. * somehow we run into a failure, just do the best we can to get out.
  182. */
  183. static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
  184. {
  185. struct vmw_private *dev_priv;
  186. struct vmw_screen_object_unit *sou;
  187. struct vmw_framebuffer *vfb;
  188. struct drm_framebuffer *fb;
  189. struct drm_plane_state *ps;
  190. struct vmw_plane_state *vps;
  191. int ret;
  192. sou = vmw_crtc_to_sou(crtc);
  193. dev_priv = vmw_priv(crtc->dev);
  194. ps = crtc->primary->state;
  195. fb = ps->fb;
  196. vps = vmw_plane_state_to_vps(ps);
  197. vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
  198. if (sou->defined) {
  199. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  200. if (ret) {
  201. DRM_ERROR("Failed to destroy Screen Object\n");
  202. return;
  203. }
  204. }
  205. if (vfb) {
  206. sou->buffer = vps->dmabuf;
  207. sou->buffer_size = vps->dmabuf_size;
  208. ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
  209. &crtc->mode);
  210. if (ret)
  211. DRM_ERROR("Failed to define Screen Object %dx%d\n",
  212. crtc->x, crtc->y);
  213. vmw_kms_add_active(dev_priv, &sou->base, vfb);
  214. } else {
  215. sou->buffer = NULL;
  216. sou->buffer_size = 0;
  217. vmw_kms_del_active(dev_priv, &sou->base);
  218. }
  219. }
  220. /**
  221. * vmw_sou_crtc_helper_prepare - Noop
  222. *
  223. * @crtc: CRTC associated with the new screen
  224. *
  225. * Prepares the CRTC for a mode set, but we don't need to do anything here.
  226. */
  227. static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
  228. {
  229. }
  230. /**
  231. * vmw_sou_crtc_atomic_enable - Noop
  232. *
  233. * @crtc: CRTC associated with the new screen
  234. *
  235. * This is called after a mode set has been completed.
  236. */
  237. static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
  238. struct drm_crtc_state *old_state)
  239. {
  240. }
  241. /**
  242. * vmw_sou_crtc_atomic_disable - Turns off CRTC
  243. *
  244. * @crtc: CRTC to be turned off
  245. */
  246. static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
  247. struct drm_crtc_state *old_state)
  248. {
  249. struct vmw_private *dev_priv;
  250. struct vmw_screen_object_unit *sou;
  251. int ret;
  252. if (!crtc) {
  253. DRM_ERROR("CRTC is NULL\n");
  254. return;
  255. }
  256. sou = vmw_crtc_to_sou(crtc);
  257. dev_priv = vmw_priv(crtc->dev);
  258. if (sou->defined) {
  259. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  260. if (ret)
  261. DRM_ERROR("Failed to destroy Screen Object\n");
  262. }
  263. }
  264. static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
  265. struct drm_framebuffer *new_fb,
  266. struct drm_pending_vblank_event *event,
  267. uint32_t flags,
  268. struct drm_modeset_acquire_ctx *ctx)
  269. {
  270. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  271. struct drm_framebuffer *old_fb = crtc->primary->fb;
  272. struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
  273. struct vmw_fence_obj *fence = NULL;
  274. struct drm_vmw_rect vclips;
  275. int ret;
  276. if (!vmw_kms_crtc_flippable(dev_priv, crtc))
  277. return -EINVAL;
  278. flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
  279. ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx);
  280. if (ret) {
  281. DRM_ERROR("Page flip error %d.\n", ret);
  282. return ret;
  283. }
  284. /* do a full screen dirty update */
  285. vclips.x = crtc->x;
  286. vclips.y = crtc->y;
  287. vclips.w = crtc->mode.hdisplay;
  288. vclips.h = crtc->mode.vdisplay;
  289. if (vfb->dmabuf)
  290. ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
  291. NULL, &vclips, 1, 1,
  292. true, &fence);
  293. else
  294. ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
  295. NULL, &vclips, NULL,
  296. 0, 0, 1, 1, &fence);
  297. if (ret != 0)
  298. goto out_no_fence;
  299. if (!fence) {
  300. ret = -EINVAL;
  301. goto out_no_fence;
  302. }
  303. if (event) {
  304. struct drm_file *file_priv = event->base.file_priv;
  305. ret = vmw_event_fence_action_queue(file_priv, fence,
  306. &event->base,
  307. &event->event.vbl.tv_sec,
  308. &event->event.vbl.tv_usec,
  309. true);
  310. }
  311. /*
  312. * No need to hold on to this now. The only cleanup
  313. * we need to do if we fail is unref the fence.
  314. */
  315. vmw_fence_obj_unreference(&fence);
  316. if (vmw_crtc_to_du(crtc)->is_implicit)
  317. vmw_kms_update_implicit_fb(dev_priv, crtc);
  318. return ret;
  319. out_no_fence:
  320. drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb);
  321. return ret;
  322. }
  323. static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
  324. .gamma_set = vmw_du_crtc_gamma_set,
  325. .destroy = vmw_sou_crtc_destroy,
  326. .reset = vmw_du_crtc_reset,
  327. .atomic_duplicate_state = vmw_du_crtc_duplicate_state,
  328. .atomic_destroy_state = vmw_du_crtc_destroy_state,
  329. .set_config = vmw_kms_set_config,
  330. .page_flip = vmw_sou_crtc_page_flip,
  331. };
  332. /*
  333. * Screen Object Display Unit encoder functions
  334. */
  335. static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
  336. {
  337. vmw_sou_destroy(vmw_encoder_to_sou(encoder));
  338. }
  339. static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
  340. .destroy = vmw_sou_encoder_destroy,
  341. };
  342. /*
  343. * Screen Object Display Unit connector functions
  344. */
  345. static void vmw_sou_connector_destroy(struct drm_connector *connector)
  346. {
  347. vmw_sou_destroy(vmw_connector_to_sou(connector));
  348. }
  349. static const struct drm_connector_funcs vmw_sou_connector_funcs = {
  350. .dpms = vmw_du_connector_dpms,
  351. .detect = vmw_du_connector_detect,
  352. .fill_modes = vmw_du_connector_fill_modes,
  353. .set_property = vmw_du_connector_set_property,
  354. .destroy = vmw_sou_connector_destroy,
  355. .reset = vmw_du_connector_reset,
  356. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  357. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  358. .atomic_set_property = vmw_du_connector_atomic_set_property,
  359. .atomic_get_property = vmw_du_connector_atomic_get_property,
  360. };
  361. static const struct
  362. drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
  363. .best_encoder = drm_atomic_helper_best_encoder,
  364. };
  365. /*
  366. * Screen Object Display Plane Functions
  367. */
  368. /**
  369. * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
  370. *
  371. * @plane: display plane
  372. * @old_state: Contains the FB to clean up
  373. *
  374. * Unpins the display surface
  375. *
  376. * Returns 0 on success
  377. */
  378. static void
  379. vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
  380. struct drm_plane_state *old_state)
  381. {
  382. struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
  383. vmw_dmabuf_unreference(&vps->dmabuf);
  384. vps->dmabuf_size = 0;
  385. vmw_du_plane_cleanup_fb(plane, old_state);
  386. }
  387. /**
  388. * vmw_sou_primary_plane_prepare_fb - allocate backing buffer
  389. *
  390. * @plane: display plane
  391. * @new_state: info on the new plane state, including the FB
  392. *
  393. * The SOU backing buffer is our equivalent of the display plane.
  394. *
  395. * Returns 0 on success
  396. */
  397. static int
  398. vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
  399. struct drm_plane_state *new_state)
  400. {
  401. struct drm_framebuffer *new_fb = new_state->fb;
  402. struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
  403. struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
  404. struct vmw_private *dev_priv;
  405. size_t size;
  406. int ret;
  407. if (!new_fb) {
  408. vmw_dmabuf_unreference(&vps->dmabuf);
  409. vps->dmabuf_size = 0;
  410. return 0;
  411. }
  412. size = new_state->crtc_w * new_state->crtc_h * 4;
  413. if (vps->dmabuf) {
  414. if (vps->dmabuf_size == size)
  415. return 0;
  416. vmw_dmabuf_unreference(&vps->dmabuf);
  417. vps->dmabuf_size = 0;
  418. }
  419. vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
  420. if (!vps->dmabuf)
  421. return -ENOMEM;
  422. dev_priv = vmw_priv(crtc->dev);
  423. vmw_svga_enable(dev_priv);
  424. /* After we have alloced the backing store might not be able to
  425. * resume the overlays, this is preferred to failing to alloc.
  426. */
  427. vmw_overlay_pause_all(dev_priv);
  428. ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
  429. &vmw_vram_ne_placement,
  430. false, &vmw_dmabuf_bo_free);
  431. vmw_overlay_resume_all(dev_priv);
  432. if (ret != 0)
  433. vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
  434. else
  435. vps->dmabuf_size = size;
  436. return ret;
  437. }
  438. static void
  439. vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
  440. struct drm_plane_state *old_state)
  441. {
  442. struct drm_crtc *crtc = plane->state->crtc;
  443. if (crtc)
  444. crtc->primary->fb = plane->state->fb;
  445. }
  446. static const struct drm_plane_funcs vmw_sou_plane_funcs = {
  447. .update_plane = drm_atomic_helper_update_plane,
  448. .disable_plane = drm_atomic_helper_disable_plane,
  449. .destroy = vmw_du_primary_plane_destroy,
  450. .reset = vmw_du_plane_reset,
  451. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  452. .atomic_destroy_state = vmw_du_plane_destroy_state,
  453. };
  454. static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
  455. .update_plane = drm_atomic_helper_update_plane,
  456. .disable_plane = drm_atomic_helper_disable_plane,
  457. .destroy = vmw_du_cursor_plane_destroy,
  458. .reset = vmw_du_plane_reset,
  459. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  460. .atomic_destroy_state = vmw_du_plane_destroy_state,
  461. };
  462. /*
  463. * Atomic Helpers
  464. */
  465. static const struct
  466. drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
  467. .atomic_check = vmw_du_cursor_plane_atomic_check,
  468. .atomic_update = vmw_du_cursor_plane_atomic_update,
  469. .prepare_fb = vmw_du_cursor_plane_prepare_fb,
  470. .cleanup_fb = vmw_du_plane_cleanup_fb,
  471. };
  472. static const struct
  473. drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
  474. .atomic_check = vmw_du_primary_plane_atomic_check,
  475. .atomic_update = vmw_sou_primary_plane_atomic_update,
  476. .prepare_fb = vmw_sou_primary_plane_prepare_fb,
  477. .cleanup_fb = vmw_sou_primary_plane_cleanup_fb,
  478. };
  479. static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
  480. .prepare = vmw_sou_crtc_helper_prepare,
  481. .mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
  482. .atomic_check = vmw_du_crtc_atomic_check,
  483. .atomic_begin = vmw_du_crtc_atomic_begin,
  484. .atomic_flush = vmw_du_crtc_atomic_flush,
  485. .atomic_enable = vmw_sou_crtc_atomic_enable,
  486. .atomic_disable = vmw_sou_crtc_atomic_disable,
  487. };
  488. static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
  489. {
  490. struct vmw_screen_object_unit *sou;
  491. struct drm_device *dev = dev_priv->dev;
  492. struct drm_connector *connector;
  493. struct drm_encoder *encoder;
  494. struct drm_plane *primary, *cursor;
  495. struct drm_crtc *crtc;
  496. int ret;
  497. sou = kzalloc(sizeof(*sou), GFP_KERNEL);
  498. if (!sou)
  499. return -ENOMEM;
  500. sou->base.unit = unit;
  501. crtc = &sou->base.crtc;
  502. encoder = &sou->base.encoder;
  503. connector = &sou->base.connector;
  504. primary = &sou->base.primary;
  505. cursor = &sou->base.cursor;
  506. sou->base.active_implicit = false;
  507. sou->base.pref_active = (unit == 0);
  508. sou->base.pref_width = dev_priv->initial_width;
  509. sou->base.pref_height = dev_priv->initial_height;
  510. sou->base.pref_mode = NULL;
  511. /*
  512. * Remove this after enabling atomic because property values can
  513. * only exist in a state object
  514. */
  515. sou->base.is_implicit = false;
  516. /* Initialize primary plane */
  517. vmw_du_plane_reset(primary);
  518. ret = drm_universal_plane_init(dev, &sou->base.primary,
  519. 0, &vmw_sou_plane_funcs,
  520. vmw_primary_plane_formats,
  521. ARRAY_SIZE(vmw_primary_plane_formats),
  522. NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
  523. if (ret) {
  524. DRM_ERROR("Failed to initialize primary plane");
  525. goto err_free;
  526. }
  527. drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
  528. /* Initialize cursor plane */
  529. vmw_du_plane_reset(cursor);
  530. ret = drm_universal_plane_init(dev, &sou->base.cursor,
  531. 0, &vmw_sou_cursor_funcs,
  532. vmw_cursor_plane_formats,
  533. ARRAY_SIZE(vmw_cursor_plane_formats),
  534. NULL, DRM_PLANE_TYPE_CURSOR, NULL);
  535. if (ret) {
  536. DRM_ERROR("Failed to initialize cursor plane");
  537. drm_plane_cleanup(&sou->base.primary);
  538. goto err_free;
  539. }
  540. drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
  541. vmw_du_connector_reset(connector);
  542. ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
  543. DRM_MODE_CONNECTOR_VIRTUAL);
  544. if (ret) {
  545. DRM_ERROR("Failed to initialize connector\n");
  546. goto err_free;
  547. }
  548. drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
  549. connector->status = vmw_du_connector_detect(connector, true);
  550. vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
  551. ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
  552. DRM_MODE_ENCODER_VIRTUAL, NULL);
  553. if (ret) {
  554. DRM_ERROR("Failed to initialize encoder\n");
  555. goto err_free_connector;
  556. }
  557. (void) drm_mode_connector_attach_encoder(connector, encoder);
  558. encoder->possible_crtcs = (1 << unit);
  559. encoder->possible_clones = 0;
  560. ret = drm_connector_register(connector);
  561. if (ret) {
  562. DRM_ERROR("Failed to register connector\n");
  563. goto err_free_encoder;
  564. }
  565. vmw_du_crtc_reset(crtc);
  566. ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
  567. &sou->base.cursor,
  568. &vmw_screen_object_crtc_funcs, NULL);
  569. if (ret) {
  570. DRM_ERROR("Failed to initialize CRTC\n");
  571. goto err_free_unregister;
  572. }
  573. drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs);
  574. drm_mode_crtc_set_gamma_size(crtc, 256);
  575. drm_object_attach_property(&connector->base,
  576. dev_priv->hotplug_mode_update_property, 1);
  577. drm_object_attach_property(&connector->base,
  578. dev->mode_config.suggested_x_property, 0);
  579. drm_object_attach_property(&connector->base,
  580. dev->mode_config.suggested_y_property, 0);
  581. if (dev_priv->implicit_placement_property)
  582. drm_object_attach_property
  583. (&connector->base,
  584. dev_priv->implicit_placement_property,
  585. sou->base.is_implicit);
  586. return 0;
  587. err_free_unregister:
  588. drm_connector_unregister(connector);
  589. err_free_encoder:
  590. drm_encoder_cleanup(encoder);
  591. err_free_connector:
  592. drm_connector_cleanup(connector);
  593. err_free:
  594. kfree(sou);
  595. return ret;
  596. }
  597. int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
  598. {
  599. struct drm_device *dev = dev_priv->dev;
  600. int i, ret;
  601. if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
  602. DRM_INFO("Not using screen objects,"
  603. " missing cap SCREEN_OBJECT_2\n");
  604. return -ENOSYS;
  605. }
  606. ret = -ENOMEM;
  607. dev_priv->num_implicit = 0;
  608. dev_priv->implicit_fb = NULL;
  609. ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
  610. if (unlikely(ret != 0))
  611. return ret;
  612. vmw_kms_create_implicit_placement_property(dev_priv, false);
  613. for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
  614. vmw_sou_init(dev_priv, i);
  615. dev_priv->active_display_unit = vmw_du_screen_object;
  616. DRM_INFO("Screen Objects Display Unit initialized\n");
  617. return 0;
  618. }
  619. static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
  620. struct vmw_framebuffer *framebuffer)
  621. {
  622. struct vmw_dma_buffer *buf =
  623. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  624. base)->buffer;
  625. int depth = framebuffer->base.format->depth;
  626. struct {
  627. uint32_t header;
  628. SVGAFifoCmdDefineGMRFB body;
  629. } *cmd;
  630. /* Emulate RGBA support, contrary to svga_reg.h this is not
  631. * supported by hosts. This is only a problem if we are reading
  632. * this value later and expecting what we uploaded back.
  633. */
  634. if (depth == 32)
  635. depth = 24;
  636. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  637. if (!cmd) {
  638. DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
  639. return -ENOMEM;
  640. }
  641. cmd->header = SVGA_CMD_DEFINE_GMRFB;
  642. cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
  643. cmd->body.format.colorDepth = depth;
  644. cmd->body.format.reserved = 0;
  645. cmd->body.bytesPerLine = framebuffer->base.pitches[0];
  646. /* Buffer is reserved in vram or GMR */
  647. vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
  648. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  649. return 0;
  650. }
  651. /**
  652. * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
  653. * blit surface to screen command.
  654. *
  655. * @dirty: The closure structure.
  656. *
  657. * Fills in the missing fields in the command, and translates the cliprects
  658. * to match the destination bounding box encoded.
  659. */
  660. static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
  661. {
  662. struct vmw_kms_sou_surface_dirty *sdirty =
  663. container_of(dirty, typeof(*sdirty), base);
  664. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  665. s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
  666. s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
  667. size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
  668. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  669. int i;
  670. if (!dirty->num_hits) {
  671. vmw_fifo_commit(dirty->dev_priv, 0);
  672. return;
  673. }
  674. cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
  675. cmd->header.size = sizeof(cmd->body) + region_size;
  676. /*
  677. * Use the destination bounding box to specify destination - and
  678. * source bounding regions.
  679. */
  680. cmd->body.destRect.left = sdirty->left;
  681. cmd->body.destRect.right = sdirty->right;
  682. cmd->body.destRect.top = sdirty->top;
  683. cmd->body.destRect.bottom = sdirty->bottom;
  684. cmd->body.srcRect.left = sdirty->left + trans_x;
  685. cmd->body.srcRect.right = sdirty->right + trans_x;
  686. cmd->body.srcRect.top = sdirty->top + trans_y;
  687. cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
  688. cmd->body.srcImage.sid = sdirty->sid;
  689. cmd->body.destScreenId = dirty->unit->unit;
  690. /* Blits are relative to the destination rect. Translate. */
  691. for (i = 0; i < dirty->num_hits; ++i, ++blit) {
  692. blit->left -= sdirty->left;
  693. blit->right -= sdirty->left;
  694. blit->top -= sdirty->top;
  695. blit->bottom -= sdirty->top;
  696. }
  697. vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
  698. sdirty->left = sdirty->top = S32_MAX;
  699. sdirty->right = sdirty->bottom = S32_MIN;
  700. }
  701. /**
  702. * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
  703. *
  704. * @dirty: The closure structure
  705. *
  706. * Encodes a SVGASignedRect cliprect and updates the bounding box of the
  707. * BLIT_SURFACE_TO_SCREEN command.
  708. */
  709. static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
  710. {
  711. struct vmw_kms_sou_surface_dirty *sdirty =
  712. container_of(dirty, typeof(*sdirty), base);
  713. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  714. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  715. /* Destination rect. */
  716. blit += dirty->num_hits;
  717. blit->left = dirty->unit_x1;
  718. blit->top = dirty->unit_y1;
  719. blit->right = dirty->unit_x2;
  720. blit->bottom = dirty->unit_y2;
  721. /* Destination bounding box */
  722. sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
  723. sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
  724. sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
  725. sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
  726. dirty->num_hits++;
  727. }
  728. /**
  729. * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
  730. *
  731. * @dev_priv: Pointer to the device private structure.
  732. * @framebuffer: Pointer to the surface-buffer backed framebuffer.
  733. * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
  734. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  735. * be NULL.
  736. * @srf: Pointer to surface to blit from. If NULL, the surface attached
  737. * to @framebuffer will be used.
  738. * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
  739. * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
  740. * @num_clips: Number of clip rects in @clips.
  741. * @inc: Increment to use when looping over @clips.
  742. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  743. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  744. * case the device has already synchronized.
  745. *
  746. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  747. * interrupted.
  748. */
  749. int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
  750. struct vmw_framebuffer *framebuffer,
  751. struct drm_clip_rect *clips,
  752. struct drm_vmw_rect *vclips,
  753. struct vmw_resource *srf,
  754. s32 dest_x,
  755. s32 dest_y,
  756. unsigned num_clips, int inc,
  757. struct vmw_fence_obj **out_fence)
  758. {
  759. struct vmw_framebuffer_surface *vfbs =
  760. container_of(framebuffer, typeof(*vfbs), base);
  761. struct vmw_kms_sou_surface_dirty sdirty;
  762. int ret;
  763. if (!srf)
  764. srf = &vfbs->surface->res;
  765. ret = vmw_kms_helper_resource_prepare(srf, true);
  766. if (ret)
  767. return ret;
  768. sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
  769. sdirty.base.clip = vmw_sou_surface_clip;
  770. sdirty.base.dev_priv = dev_priv;
  771. sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
  772. sizeof(SVGASignedRect) * num_clips;
  773. sdirty.sid = srf->id;
  774. sdirty.left = sdirty.top = S32_MAX;
  775. sdirty.right = sdirty.bottom = S32_MIN;
  776. sdirty.dst_x = dest_x;
  777. sdirty.dst_y = dest_y;
  778. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  779. dest_x, dest_y, num_clips, inc,
  780. &sdirty.base);
  781. vmw_kms_helper_resource_finish(srf, out_fence);
  782. return ret;
  783. }
  784. /**
  785. * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
  786. *
  787. * @dirty: The closure structure.
  788. *
  789. * Commits a previously built command buffer of readback clips.
  790. */
  791. static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  792. {
  793. if (!dirty->num_hits) {
  794. vmw_fifo_commit(dirty->dev_priv, 0);
  795. return;
  796. }
  797. vmw_fifo_commit(dirty->dev_priv,
  798. sizeof(struct vmw_kms_sou_dmabuf_blit) *
  799. dirty->num_hits);
  800. }
  801. /**
  802. * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
  803. *
  804. * @dirty: The closure structure
  805. *
  806. * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
  807. */
  808. static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
  809. {
  810. struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
  811. blit += dirty->num_hits;
  812. blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
  813. blit->body.destScreenId = dirty->unit->unit;
  814. blit->body.srcOrigin.x = dirty->fb_x;
  815. blit->body.srcOrigin.y = dirty->fb_y;
  816. blit->body.destRect.left = dirty->unit_x1;
  817. blit->body.destRect.top = dirty->unit_y1;
  818. blit->body.destRect.right = dirty->unit_x2;
  819. blit->body.destRect.bottom = dirty->unit_y2;
  820. dirty->num_hits++;
  821. }
  822. /**
  823. * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
  824. *
  825. * @dev_priv: Pointer to the device private structure.
  826. * @framebuffer: Pointer to the dma-buffer backed framebuffer.
  827. * @clips: Array of clip rects.
  828. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  829. * be NULL.
  830. * @num_clips: Number of clip rects in @clips.
  831. * @increment: Increment to use when looping over @clips.
  832. * @interruptible: Whether to perform waits interruptible if possible.
  833. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  834. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  835. * case the device has already synchronized.
  836. *
  837. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  838. * interrupted.
  839. */
  840. int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
  841. struct vmw_framebuffer *framebuffer,
  842. struct drm_clip_rect *clips,
  843. struct drm_vmw_rect *vclips,
  844. unsigned num_clips, int increment,
  845. bool interruptible,
  846. struct vmw_fence_obj **out_fence)
  847. {
  848. struct vmw_dma_buffer *buf =
  849. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  850. base)->buffer;
  851. struct vmw_kms_dirty dirty;
  852. int ret;
  853. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
  854. false);
  855. if (ret)
  856. return ret;
  857. ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
  858. if (unlikely(ret != 0))
  859. goto out_revert;
  860. dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
  861. dirty.clip = vmw_sou_dmabuf_clip;
  862. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
  863. num_clips;
  864. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  865. 0, 0, num_clips, increment, &dirty);
  866. vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
  867. return ret;
  868. out_revert:
  869. vmw_kms_helper_buffer_revert(buf);
  870. return ret;
  871. }
  872. /**
  873. * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
  874. *
  875. * @dirty: The closure structure.
  876. *
  877. * Commits a previously built command buffer of readback clips.
  878. */
  879. static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
  880. {
  881. if (!dirty->num_hits) {
  882. vmw_fifo_commit(dirty->dev_priv, 0);
  883. return;
  884. }
  885. vmw_fifo_commit(dirty->dev_priv,
  886. sizeof(struct vmw_kms_sou_readback_blit) *
  887. dirty->num_hits);
  888. }
  889. /**
  890. * vmw_sou_readback_clip - Callback to encode a readback cliprect.
  891. *
  892. * @dirty: The closure structure
  893. *
  894. * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
  895. */
  896. static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
  897. {
  898. struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
  899. blit += dirty->num_hits;
  900. blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
  901. blit->body.srcScreenId = dirty->unit->unit;
  902. blit->body.destOrigin.x = dirty->fb_x;
  903. blit->body.destOrigin.y = dirty->fb_y;
  904. blit->body.srcRect.left = dirty->unit_x1;
  905. blit->body.srcRect.top = dirty->unit_y1;
  906. blit->body.srcRect.right = dirty->unit_x2;
  907. blit->body.srcRect.bottom = dirty->unit_y2;
  908. dirty->num_hits++;
  909. }
  910. /**
  911. * vmw_kms_sou_readback - Perform a readback from the screen object system to
  912. * a dma-buffer backed framebuffer.
  913. *
  914. * @dev_priv: Pointer to the device private structure.
  915. * @file_priv: Pointer to a struct drm_file identifying the caller.
  916. * Must be set to NULL if @user_fence_rep is NULL.
  917. * @vfb: Pointer to the dma-buffer backed framebuffer.
  918. * @user_fence_rep: User-space provided structure for fence information.
  919. * Must be set to non-NULL if @file_priv is non-NULL.
  920. * @vclips: Array of clip rects.
  921. * @num_clips: Number of clip rects in @vclips.
  922. *
  923. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  924. * interrupted.
  925. */
  926. int vmw_kms_sou_readback(struct vmw_private *dev_priv,
  927. struct drm_file *file_priv,
  928. struct vmw_framebuffer *vfb,
  929. struct drm_vmw_fence_rep __user *user_fence_rep,
  930. struct drm_vmw_rect *vclips,
  931. uint32_t num_clips)
  932. {
  933. struct vmw_dma_buffer *buf =
  934. container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
  935. struct vmw_kms_dirty dirty;
  936. int ret;
  937. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
  938. if (ret)
  939. return ret;
  940. ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
  941. if (unlikely(ret != 0))
  942. goto out_revert;
  943. dirty.fifo_commit = vmw_sou_readback_fifo_commit;
  944. dirty.clip = vmw_sou_readback_clip;
  945. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
  946. num_clips;
  947. ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
  948. 0, 0, num_clips, 1, &dirty);
  949. vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
  950. user_fence_rep);
  951. return ret;
  952. out_revert:
  953. vmw_kms_helper_buffer_revert(buf);
  954. return ret;
  955. }