vmwgfx_scrn.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. /**************************************************************************
  2. *
  3. * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_kms.h"
  28. #include <drm/drm_plane_helper.h>
  29. #define vmw_crtc_to_sou(x) \
  30. container_of(x, struct vmw_screen_object_unit, base.crtc)
  31. #define vmw_encoder_to_sou(x) \
  32. container_of(x, struct vmw_screen_object_unit, base.encoder)
  33. #define vmw_connector_to_sou(x) \
  34. container_of(x, struct vmw_screen_object_unit, base.connector)
  35. /**
  36. * struct vmw_kms_sou_surface_dirty - Closure structure for
  37. * blit surface to screen command.
  38. * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
  39. * @left: Left side of bounding box.
  40. * @right: Right side of bounding box.
  41. * @top: Top side of bounding box.
  42. * @bottom: Bottom side of bounding box.
  43. * @dst_x: Difference between source clip rects and framebuffer coordinates.
  44. * @dst_y: Difference between source clip rects and framebuffer coordinates.
  45. * @sid: Surface id of surface to copy from.
  46. */
  47. struct vmw_kms_sou_surface_dirty {
  48. struct vmw_kms_dirty base;
  49. s32 left, right, top, bottom;
  50. s32 dst_x, dst_y;
  51. u32 sid;
  52. };
  53. /*
  54. * SVGA commands that are used by this code. Please see the device headers
  55. * for explanation.
  56. */
  57. struct vmw_kms_sou_readback_blit {
  58. uint32 header;
  59. SVGAFifoCmdBlitScreenToGMRFB body;
  60. };
  61. struct vmw_kms_sou_dmabuf_blit {
  62. uint32 header;
  63. SVGAFifoCmdBlitGMRFBToScreen body;
  64. };
  65. struct vmw_kms_sou_dirty_cmd {
  66. SVGA3dCmdHeader header;
  67. SVGA3dCmdBlitSurfaceToScreen body;
  68. };
  69. /**
  70. * Display unit using screen objects.
  71. */
  72. struct vmw_screen_object_unit {
  73. struct vmw_display_unit base;
  74. unsigned long buffer_size; /**< Size of allocated buffer */
  75. struct vmw_dma_buffer *buffer; /**< Backing store buffer */
  76. bool defined;
  77. };
  78. static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
  79. {
  80. vmw_du_cleanup(&sou->base);
  81. kfree(sou);
  82. }
  83. /*
  84. * Screen Object Display Unit CRTC functions
  85. */
  86. static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
  87. {
  88. vmw_sou_destroy(vmw_crtc_to_sou(crtc));
  89. }
  90. /**
  91. * Send the fifo command to create a screen.
  92. */
  93. static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
  94. struct vmw_screen_object_unit *sou,
  95. uint32_t x, uint32_t y,
  96. struct drm_display_mode *mode)
  97. {
  98. size_t fifo_size;
  99. struct {
  100. struct {
  101. uint32_t cmdType;
  102. } header;
  103. SVGAScreenObject obj;
  104. } *cmd;
  105. BUG_ON(!sou->buffer);
  106. fifo_size = sizeof(*cmd);
  107. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  108. /* The hardware has hung, nothing we can do about it here. */
  109. if (unlikely(cmd == NULL)) {
  110. DRM_ERROR("Fifo reserve failed.\n");
  111. return -ENOMEM;
  112. }
  113. memset(cmd, 0, fifo_size);
  114. cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
  115. cmd->obj.structSize = sizeof(SVGAScreenObject);
  116. cmd->obj.id = sou->base.unit;
  117. cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
  118. (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
  119. cmd->obj.size.width = mode->hdisplay;
  120. cmd->obj.size.height = mode->vdisplay;
  121. if (sou->base.is_implicit) {
  122. cmd->obj.root.x = x;
  123. cmd->obj.root.y = y;
  124. } else {
  125. cmd->obj.root.x = sou->base.gui_x;
  126. cmd->obj.root.y = sou->base.gui_y;
  127. }
  128. sou->base.set_gui_x = cmd->obj.root.x;
  129. sou->base.set_gui_y = cmd->obj.root.y;
  130. /* Ok to assume that buffer is pinned in vram */
  131. vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
  132. cmd->obj.backingStore.pitch = mode->hdisplay * 4;
  133. vmw_fifo_commit(dev_priv, fifo_size);
  134. sou->defined = true;
  135. return 0;
  136. }
  137. /**
  138. * Send the fifo command to destroy a screen.
  139. */
  140. static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
  141. struct vmw_screen_object_unit *sou)
  142. {
  143. size_t fifo_size;
  144. int ret;
  145. struct {
  146. struct {
  147. uint32_t cmdType;
  148. } header;
  149. SVGAFifoCmdDestroyScreen body;
  150. } *cmd;
  151. /* no need to do anything */
  152. if (unlikely(!sou->defined))
  153. return 0;
  154. fifo_size = sizeof(*cmd);
  155. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  156. /* the hardware has hung, nothing we can do about it here */
  157. if (unlikely(cmd == NULL)) {
  158. DRM_ERROR("Fifo reserve failed.\n");
  159. return -ENOMEM;
  160. }
  161. memset(cmd, 0, fifo_size);
  162. cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
  163. cmd->body.screenId = sou->base.unit;
  164. vmw_fifo_commit(dev_priv, fifo_size);
  165. /* Force sync */
  166. ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
  167. if (unlikely(ret != 0))
  168. DRM_ERROR("Failed to sync with HW");
  169. else
  170. sou->defined = false;
  171. return ret;
  172. }
  173. /**
  174. * Free the backing store.
  175. */
  176. static void vmw_sou_backing_free(struct vmw_private *dev_priv,
  177. struct vmw_screen_object_unit *sou)
  178. {
  179. vmw_dmabuf_unreference(&sou->buffer);
  180. sou->buffer_size = 0;
  181. }
  182. /**
  183. * Allocate the backing store for the buffer.
  184. */
  185. static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
  186. struct vmw_screen_object_unit *sou,
  187. unsigned long size)
  188. {
  189. int ret;
  190. if (sou->buffer_size == size)
  191. return 0;
  192. if (sou->buffer)
  193. vmw_sou_backing_free(dev_priv, sou);
  194. sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
  195. if (unlikely(sou->buffer == NULL))
  196. return -ENOMEM;
  197. /* After we have alloced the backing store might not be able to
  198. * resume the overlays, this is preferred to failing to alloc.
  199. */
  200. vmw_overlay_pause_all(dev_priv);
  201. ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
  202. &vmw_vram_ne_placement,
  203. false, &vmw_dmabuf_bo_free);
  204. vmw_overlay_resume_all(dev_priv);
  205. if (unlikely(ret != 0))
  206. sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
  207. else
  208. sou->buffer_size = size;
  209. return ret;
  210. }
  211. static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
  212. {
  213. struct vmw_private *dev_priv;
  214. struct vmw_screen_object_unit *sou;
  215. struct drm_connector *connector;
  216. struct drm_display_mode *mode;
  217. struct drm_encoder *encoder;
  218. struct vmw_framebuffer *vfb;
  219. struct drm_framebuffer *fb;
  220. struct drm_crtc *crtc;
  221. int ret = 0;
  222. if (!set)
  223. return -EINVAL;
  224. if (!set->crtc)
  225. return -EINVAL;
  226. /* get the sou */
  227. crtc = set->crtc;
  228. sou = vmw_crtc_to_sou(crtc);
  229. vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
  230. dev_priv = vmw_priv(crtc->dev);
  231. if (set->num_connectors > 1) {
  232. DRM_ERROR("Too many connectors\n");
  233. return -EINVAL;
  234. }
  235. if (set->num_connectors == 1 &&
  236. set->connectors[0] != &sou->base.connector) {
  237. DRM_ERROR("Connector doesn't match %p %p\n",
  238. set->connectors[0], &sou->base.connector);
  239. return -EINVAL;
  240. }
  241. /* Only one active implicit frame-buffer at a time. */
  242. if (sou->base.is_implicit &&
  243. dev_priv->implicit_fb && vfb &&
  244. !(dev_priv->num_implicit == 1 &&
  245. sou->base.active_implicit) &&
  246. dev_priv->implicit_fb != vfb) {
  247. DRM_ERROR("Multiple implicit framebuffers not supported.\n");
  248. return -EINVAL;
  249. }
  250. /* since they always map one to one these are safe */
  251. connector = &sou->base.connector;
  252. encoder = &sou->base.encoder;
  253. /* should we turn the crtc off */
  254. if (set->num_connectors == 0 || !set->mode || !set->fb) {
  255. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  256. /* the hardware has hung don't do anything more */
  257. if (unlikely(ret != 0))
  258. return ret;
  259. connector->encoder = NULL;
  260. encoder->crtc = NULL;
  261. crtc->primary->fb = NULL;
  262. crtc->x = 0;
  263. crtc->y = 0;
  264. crtc->enabled = false;
  265. vmw_kms_del_active(dev_priv, &sou->base);
  266. vmw_sou_backing_free(dev_priv, sou);
  267. return 0;
  268. }
  269. /* we now know we want to set a mode */
  270. mode = set->mode;
  271. fb = set->fb;
  272. if (set->x + mode->hdisplay > fb->width ||
  273. set->y + mode->vdisplay > fb->height) {
  274. DRM_ERROR("set outside of framebuffer\n");
  275. return -EINVAL;
  276. }
  277. vmw_svga_enable(dev_priv);
  278. if (mode->hdisplay != crtc->mode.hdisplay ||
  279. mode->vdisplay != crtc->mode.vdisplay) {
  280. /* no need to check if depth is different, because backing
  281. * store depth is forced to 4 by the device.
  282. */
  283. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  284. /* the hardware has hung don't do anything more */
  285. if (unlikely(ret != 0))
  286. return ret;
  287. vmw_sou_backing_free(dev_priv, sou);
  288. }
  289. if (!sou->buffer) {
  290. /* forced to depth 4 by the device */
  291. size_t size = mode->hdisplay * mode->vdisplay * 4;
  292. ret = vmw_sou_backing_alloc(dev_priv, sou, size);
  293. if (unlikely(ret != 0))
  294. return ret;
  295. }
  296. ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
  297. if (unlikely(ret != 0)) {
  298. /*
  299. * We are in a bit of a situation here, the hardware has
  300. * hung and we may or may not have a buffer hanging of
  301. * the screen object, best thing to do is not do anything
  302. * if we where defined, if not just turn the crtc of.
  303. * Not what userspace wants but it needs to htfu.
  304. */
  305. if (sou->defined)
  306. return ret;
  307. connector->encoder = NULL;
  308. encoder->crtc = NULL;
  309. crtc->primary->fb = NULL;
  310. crtc->x = 0;
  311. crtc->y = 0;
  312. crtc->enabled = false;
  313. return ret;
  314. }
  315. vmw_kms_add_active(dev_priv, &sou->base, vfb);
  316. connector->encoder = encoder;
  317. encoder->crtc = crtc;
  318. crtc->mode = *mode;
  319. crtc->primary->fb = fb;
  320. crtc->x = set->x;
  321. crtc->y = set->y;
  322. crtc->enabled = true;
  323. return 0;
  324. }
  325. static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
  326. struct drm_framebuffer *fb,
  327. struct drm_pending_vblank_event *event,
  328. uint32_t flags)
  329. {
  330. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  331. struct drm_framebuffer *old_fb = crtc->primary->fb;
  332. struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
  333. struct vmw_fence_obj *fence = NULL;
  334. struct drm_vmw_rect vclips;
  335. int ret;
  336. if (!vmw_kms_crtc_flippable(dev_priv, crtc))
  337. return -EINVAL;
  338. crtc->primary->fb = fb;
  339. /* do a full screen dirty update */
  340. vclips.x = crtc->x;
  341. vclips.y = crtc->y;
  342. vclips.w = crtc->mode.hdisplay;
  343. vclips.h = crtc->mode.vdisplay;
  344. if (vfb->dmabuf)
  345. ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
  346. NULL, &vclips, 1, 1,
  347. true, &fence);
  348. else
  349. ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
  350. NULL, &vclips, NULL,
  351. 0, 0, 1, 1, &fence);
  352. if (ret != 0)
  353. goto out_no_fence;
  354. if (!fence) {
  355. ret = -EINVAL;
  356. goto out_no_fence;
  357. }
  358. if (event) {
  359. struct drm_file *file_priv = event->base.file_priv;
  360. ret = vmw_event_fence_action_queue(file_priv, fence,
  361. &event->base,
  362. &event->event.tv_sec,
  363. &event->event.tv_usec,
  364. true);
  365. }
  366. /*
  367. * No need to hold on to this now. The only cleanup
  368. * we need to do if we fail is unref the fence.
  369. */
  370. vmw_fence_obj_unreference(&fence);
  371. if (vmw_crtc_to_du(crtc)->is_implicit)
  372. vmw_kms_update_implicit_fb(dev_priv, crtc);
  373. return ret;
  374. out_no_fence:
  375. crtc->primary->fb = old_fb;
  376. return ret;
  377. }
  378. static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
  379. .cursor_set2 = vmw_du_crtc_cursor_set2,
  380. .cursor_move = vmw_du_crtc_cursor_move,
  381. .gamma_set = vmw_du_crtc_gamma_set,
  382. .destroy = vmw_sou_crtc_destroy,
  383. .set_config = vmw_sou_crtc_set_config,
  384. .page_flip = vmw_sou_crtc_page_flip,
  385. };
  386. /*
  387. * Screen Object Display Unit encoder functions
  388. */
  389. static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
  390. {
  391. vmw_sou_destroy(vmw_encoder_to_sou(encoder));
  392. }
  393. static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
  394. .destroy = vmw_sou_encoder_destroy,
  395. };
  396. /*
  397. * Screen Object Display Unit connector functions
  398. */
  399. static void vmw_sou_connector_destroy(struct drm_connector *connector)
  400. {
  401. vmw_sou_destroy(vmw_connector_to_sou(connector));
  402. }
  403. static const struct drm_connector_funcs vmw_sou_connector_funcs = {
  404. .dpms = vmw_du_connector_dpms,
  405. .detect = vmw_du_connector_detect,
  406. .fill_modes = vmw_du_connector_fill_modes,
  407. .set_property = vmw_du_connector_set_property,
  408. .destroy = vmw_sou_connector_destroy,
  409. };
  410. static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
  411. {
  412. struct vmw_screen_object_unit *sou;
  413. struct drm_device *dev = dev_priv->dev;
  414. struct drm_connector *connector;
  415. struct drm_encoder *encoder;
  416. struct drm_crtc *crtc;
  417. sou = kzalloc(sizeof(*sou), GFP_KERNEL);
  418. if (!sou)
  419. return -ENOMEM;
  420. sou->base.unit = unit;
  421. crtc = &sou->base.crtc;
  422. encoder = &sou->base.encoder;
  423. connector = &sou->base.connector;
  424. sou->base.active_implicit = false;
  425. sou->base.pref_active = (unit == 0);
  426. sou->base.pref_width = dev_priv->initial_width;
  427. sou->base.pref_height = dev_priv->initial_height;
  428. sou->base.pref_mode = NULL;
  429. sou->base.is_implicit = false;
  430. drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
  431. DRM_MODE_CONNECTOR_VIRTUAL);
  432. connector->status = vmw_du_connector_detect(connector, true);
  433. drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
  434. DRM_MODE_ENCODER_VIRTUAL, NULL);
  435. drm_mode_connector_attach_encoder(connector, encoder);
  436. encoder->possible_crtcs = (1 << unit);
  437. encoder->possible_clones = 0;
  438. (void) drm_connector_register(connector);
  439. drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
  440. drm_mode_crtc_set_gamma_size(crtc, 256);
  441. drm_object_attach_property(&connector->base,
  442. dev->mode_config.dirty_info_property,
  443. 1);
  444. drm_object_attach_property(&connector->base,
  445. dev_priv->hotplug_mode_update_property, 1);
  446. drm_object_attach_property(&connector->base,
  447. dev->mode_config.suggested_x_property, 0);
  448. drm_object_attach_property(&connector->base,
  449. dev->mode_config.suggested_y_property, 0);
  450. if (dev_priv->implicit_placement_property)
  451. drm_object_attach_property
  452. (&connector->base,
  453. dev_priv->implicit_placement_property,
  454. sou->base.is_implicit);
  455. return 0;
  456. }
  457. int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
  458. {
  459. struct drm_device *dev = dev_priv->dev;
  460. int i, ret;
  461. if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
  462. DRM_INFO("Not using screen objects,"
  463. " missing cap SCREEN_OBJECT_2\n");
  464. return -ENOSYS;
  465. }
  466. ret = -ENOMEM;
  467. dev_priv->num_implicit = 0;
  468. dev_priv->implicit_fb = NULL;
  469. ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
  470. if (unlikely(ret != 0))
  471. return ret;
  472. ret = drm_mode_create_dirty_info_property(dev);
  473. if (unlikely(ret != 0))
  474. goto err_vblank_cleanup;
  475. vmw_kms_create_implicit_placement_property(dev_priv, false);
  476. for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
  477. vmw_sou_init(dev_priv, i);
  478. dev_priv->active_display_unit = vmw_du_screen_object;
  479. DRM_INFO("Screen Objects Display Unit initialized\n");
  480. return 0;
  481. err_vblank_cleanup:
  482. drm_vblank_cleanup(dev);
  483. return ret;
  484. }
  485. int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
  486. {
  487. struct drm_device *dev = dev_priv->dev;
  488. drm_vblank_cleanup(dev);
  489. return 0;
  490. }
  491. static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
  492. struct vmw_framebuffer *framebuffer)
  493. {
  494. struct vmw_dma_buffer *buf =
  495. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  496. base)->buffer;
  497. int depth = framebuffer->base.depth;
  498. struct {
  499. uint32_t header;
  500. SVGAFifoCmdDefineGMRFB body;
  501. } *cmd;
  502. /* Emulate RGBA support, contrary to svga_reg.h this is not
  503. * supported by hosts. This is only a problem if we are reading
  504. * this value later and expecting what we uploaded back.
  505. */
  506. if (depth == 32)
  507. depth = 24;
  508. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  509. if (!cmd) {
  510. DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
  511. return -ENOMEM;
  512. }
  513. cmd->header = SVGA_CMD_DEFINE_GMRFB;
  514. cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
  515. cmd->body.format.colorDepth = depth;
  516. cmd->body.format.reserved = 0;
  517. cmd->body.bytesPerLine = framebuffer->base.pitches[0];
  518. /* Buffer is reserved in vram or GMR */
  519. vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
  520. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  521. return 0;
  522. }
  523. /**
  524. * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
  525. * blit surface to screen command.
  526. *
  527. * @dirty: The closure structure.
  528. *
  529. * Fills in the missing fields in the command, and translates the cliprects
  530. * to match the destination bounding box encoded.
  531. */
  532. static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
  533. {
  534. struct vmw_kms_sou_surface_dirty *sdirty =
  535. container_of(dirty, typeof(*sdirty), base);
  536. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  537. s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
  538. s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
  539. size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
  540. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  541. int i;
  542. if (!dirty->num_hits) {
  543. vmw_fifo_commit(dirty->dev_priv, 0);
  544. return;
  545. }
  546. cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
  547. cmd->header.size = sizeof(cmd->body) + region_size;
  548. /*
  549. * Use the destination bounding box to specify destination - and
  550. * source bounding regions.
  551. */
  552. cmd->body.destRect.left = sdirty->left;
  553. cmd->body.destRect.right = sdirty->right;
  554. cmd->body.destRect.top = sdirty->top;
  555. cmd->body.destRect.bottom = sdirty->bottom;
  556. cmd->body.srcRect.left = sdirty->left + trans_x;
  557. cmd->body.srcRect.right = sdirty->right + trans_x;
  558. cmd->body.srcRect.top = sdirty->top + trans_y;
  559. cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
  560. cmd->body.srcImage.sid = sdirty->sid;
  561. cmd->body.destScreenId = dirty->unit->unit;
  562. /* Blits are relative to the destination rect. Translate. */
  563. for (i = 0; i < dirty->num_hits; ++i, ++blit) {
  564. blit->left -= sdirty->left;
  565. blit->right -= sdirty->left;
  566. blit->top -= sdirty->top;
  567. blit->bottom -= sdirty->top;
  568. }
  569. vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
  570. sdirty->left = sdirty->top = S32_MAX;
  571. sdirty->right = sdirty->bottom = S32_MIN;
  572. }
  573. /**
  574. * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
  575. *
  576. * @dirty: The closure structure
  577. *
  578. * Encodes a SVGASignedRect cliprect and updates the bounding box of the
  579. * BLIT_SURFACE_TO_SCREEN command.
  580. */
  581. static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
  582. {
  583. struct vmw_kms_sou_surface_dirty *sdirty =
  584. container_of(dirty, typeof(*sdirty), base);
  585. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  586. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  587. /* Destination rect. */
  588. blit += dirty->num_hits;
  589. blit->left = dirty->unit_x1;
  590. blit->top = dirty->unit_y1;
  591. blit->right = dirty->unit_x2;
  592. blit->bottom = dirty->unit_y2;
  593. /* Destination bounding box */
  594. sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
  595. sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
  596. sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
  597. sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
  598. dirty->num_hits++;
  599. }
  600. /**
  601. * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
  602. *
  603. * @dev_priv: Pointer to the device private structure.
  604. * @framebuffer: Pointer to the surface-buffer backed framebuffer.
  605. * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
  606. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  607. * be NULL.
  608. * @srf: Pointer to surface to blit from. If NULL, the surface attached
  609. * to @framebuffer will be used.
  610. * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
  611. * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
  612. * @num_clips: Number of clip rects in @clips.
  613. * @inc: Increment to use when looping over @clips.
  614. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  615. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  616. * case the device has already synchronized.
  617. *
  618. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  619. * interrupted.
  620. */
  621. int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
  622. struct vmw_framebuffer *framebuffer,
  623. struct drm_clip_rect *clips,
  624. struct drm_vmw_rect *vclips,
  625. struct vmw_resource *srf,
  626. s32 dest_x,
  627. s32 dest_y,
  628. unsigned num_clips, int inc,
  629. struct vmw_fence_obj **out_fence)
  630. {
  631. struct vmw_framebuffer_surface *vfbs =
  632. container_of(framebuffer, typeof(*vfbs), base);
  633. struct vmw_kms_sou_surface_dirty sdirty;
  634. int ret;
  635. if (!srf)
  636. srf = &vfbs->surface->res;
  637. ret = vmw_kms_helper_resource_prepare(srf, true);
  638. if (ret)
  639. return ret;
  640. sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
  641. sdirty.base.clip = vmw_sou_surface_clip;
  642. sdirty.base.dev_priv = dev_priv;
  643. sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
  644. sizeof(SVGASignedRect) * num_clips;
  645. sdirty.sid = srf->id;
  646. sdirty.left = sdirty.top = S32_MAX;
  647. sdirty.right = sdirty.bottom = S32_MIN;
  648. sdirty.dst_x = dest_x;
  649. sdirty.dst_y = dest_y;
  650. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  651. dest_x, dest_y, num_clips, inc,
  652. &sdirty.base);
  653. vmw_kms_helper_resource_finish(srf, out_fence);
  654. return ret;
  655. }
  656. /**
  657. * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
  658. *
  659. * @dirty: The closure structure.
  660. *
  661. * Commits a previously built command buffer of readback clips.
  662. */
  663. static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  664. {
  665. if (!dirty->num_hits) {
  666. vmw_fifo_commit(dirty->dev_priv, 0);
  667. return;
  668. }
  669. vmw_fifo_commit(dirty->dev_priv,
  670. sizeof(struct vmw_kms_sou_dmabuf_blit) *
  671. dirty->num_hits);
  672. }
  673. /**
  674. * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
  675. *
  676. * @dirty: The closure structure
  677. *
  678. * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
  679. */
  680. static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
  681. {
  682. struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
  683. blit += dirty->num_hits;
  684. blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
  685. blit->body.destScreenId = dirty->unit->unit;
  686. blit->body.srcOrigin.x = dirty->fb_x;
  687. blit->body.srcOrigin.y = dirty->fb_y;
  688. blit->body.destRect.left = dirty->unit_x1;
  689. blit->body.destRect.top = dirty->unit_y1;
  690. blit->body.destRect.right = dirty->unit_x2;
  691. blit->body.destRect.bottom = dirty->unit_y2;
  692. dirty->num_hits++;
  693. }
  694. /**
  695. * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
  696. *
  697. * @dev_priv: Pointer to the device private structure.
  698. * @framebuffer: Pointer to the dma-buffer backed framebuffer.
  699. * @clips: Array of clip rects.
  700. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  701. * be NULL.
  702. * @num_clips: Number of clip rects in @clips.
  703. * @increment: Increment to use when looping over @clips.
  704. * @interruptible: Whether to perform waits interruptible if possible.
  705. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  706. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  707. * case the device has already synchronized.
  708. *
  709. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  710. * interrupted.
  711. */
  712. int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
  713. struct vmw_framebuffer *framebuffer,
  714. struct drm_clip_rect *clips,
  715. struct drm_vmw_rect *vclips,
  716. unsigned num_clips, int increment,
  717. bool interruptible,
  718. struct vmw_fence_obj **out_fence)
  719. {
  720. struct vmw_dma_buffer *buf =
  721. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  722. base)->buffer;
  723. struct vmw_kms_dirty dirty;
  724. int ret;
  725. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
  726. false);
  727. if (ret)
  728. return ret;
  729. ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
  730. if (unlikely(ret != 0))
  731. goto out_revert;
  732. dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
  733. dirty.clip = vmw_sou_dmabuf_clip;
  734. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
  735. num_clips;
  736. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  737. 0, 0, num_clips, increment, &dirty);
  738. vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
  739. return ret;
  740. out_revert:
  741. vmw_kms_helper_buffer_revert(buf);
  742. return ret;
  743. }
  744. /**
  745. * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
  746. *
  747. * @dirty: The closure structure.
  748. *
  749. * Commits a previously built command buffer of readback clips.
  750. */
  751. static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
  752. {
  753. if (!dirty->num_hits) {
  754. vmw_fifo_commit(dirty->dev_priv, 0);
  755. return;
  756. }
  757. vmw_fifo_commit(dirty->dev_priv,
  758. sizeof(struct vmw_kms_sou_readback_blit) *
  759. dirty->num_hits);
  760. }
  761. /**
  762. * vmw_sou_readback_clip - Callback to encode a readback cliprect.
  763. *
  764. * @dirty: The closure structure
  765. *
  766. * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
  767. */
  768. static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
  769. {
  770. struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
  771. blit += dirty->num_hits;
  772. blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
  773. blit->body.srcScreenId = dirty->unit->unit;
  774. blit->body.destOrigin.x = dirty->fb_x;
  775. blit->body.destOrigin.y = dirty->fb_y;
  776. blit->body.srcRect.left = dirty->unit_x1;
  777. blit->body.srcRect.top = dirty->unit_y1;
  778. blit->body.srcRect.right = dirty->unit_x2;
  779. blit->body.srcRect.bottom = dirty->unit_y2;
  780. dirty->num_hits++;
  781. }
  782. /**
  783. * vmw_kms_sou_readback - Perform a readback from the screen object system to
  784. * a dma-buffer backed framebuffer.
  785. *
  786. * @dev_priv: Pointer to the device private structure.
  787. * @file_priv: Pointer to a struct drm_file identifying the caller.
  788. * Must be set to NULL if @user_fence_rep is NULL.
  789. * @vfb: Pointer to the dma-buffer backed framebuffer.
  790. * @user_fence_rep: User-space provided structure for fence information.
  791. * Must be set to non-NULL if @file_priv is non-NULL.
  792. * @vclips: Array of clip rects.
  793. * @num_clips: Number of clip rects in @vclips.
  794. *
  795. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  796. * interrupted.
  797. */
  798. int vmw_kms_sou_readback(struct vmw_private *dev_priv,
  799. struct drm_file *file_priv,
  800. struct vmw_framebuffer *vfb,
  801. struct drm_vmw_fence_rep __user *user_fence_rep,
  802. struct drm_vmw_rect *vclips,
  803. uint32_t num_clips)
  804. {
  805. struct vmw_dma_buffer *buf =
  806. container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
  807. struct vmw_kms_dirty dirty;
  808. int ret;
  809. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
  810. if (ret)
  811. return ret;
  812. ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
  813. if (unlikely(ret != 0))
  814. goto out_revert;
  815. dirty.fifo_commit = vmw_sou_readback_fifo_commit;
  816. dirty.clip = vmw_sou_readback_clip;
  817. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
  818. num_clips;
  819. ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
  820. 0, 0, num_clips, 1, &dirty);
  821. vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
  822. user_fence_rep);
  823. return ret;
  824. out_revert:
  825. vmw_kms_helper_buffer_revert(buf);
  826. return ret;
  827. }