vmwgfx_scrn.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978
  1. /**************************************************************************
  2. *
  3. * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_kms.h"
  28. #include <drm/drm_plane_helper.h>
  29. #define vmw_crtc_to_sou(x) \
  30. container_of(x, struct vmw_screen_object_unit, base.crtc)
  31. #define vmw_encoder_to_sou(x) \
  32. container_of(x, struct vmw_screen_object_unit, base.encoder)
  33. #define vmw_connector_to_sou(x) \
  34. container_of(x, struct vmw_screen_object_unit, base.connector)
  35. /**
  36. * struct vmw_kms_sou_surface_dirty - Closure structure for
  37. * blit surface to screen command.
  38. * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
  39. * @left: Left side of bounding box.
  40. * @right: Right side of bounding box.
  41. * @top: Top side of bounding box.
  42. * @bottom: Bottom side of bounding box.
  43. * @dst_x: Difference between source clip rects and framebuffer coordinates.
  44. * @dst_y: Difference between source clip rects and framebuffer coordinates.
  45. * @sid: Surface id of surface to copy from.
  46. */
  47. struct vmw_kms_sou_surface_dirty {
  48. struct vmw_kms_dirty base;
  49. s32 left, right, top, bottom;
  50. s32 dst_x, dst_y;
  51. u32 sid;
  52. };
  53. /*
  54. * SVGA commands that are used by this code. Please see the device headers
  55. * for explanation.
  56. */
  57. struct vmw_kms_sou_readback_blit {
  58. uint32 header;
  59. SVGAFifoCmdBlitScreenToGMRFB body;
  60. };
  61. struct vmw_kms_sou_dmabuf_blit {
  62. uint32 header;
  63. SVGAFifoCmdBlitGMRFBToScreen body;
  64. };
  65. struct vmw_kms_sou_dirty_cmd {
  66. SVGA3dCmdHeader header;
  67. SVGA3dCmdBlitSurfaceToScreen body;
  68. };
  69. /**
  70. * Display unit using screen objects.
  71. */
  72. struct vmw_screen_object_unit {
  73. struct vmw_display_unit base;
  74. unsigned long buffer_size; /**< Size of allocated buffer */
  75. struct vmw_dma_buffer *buffer; /**< Backing store buffer */
  76. bool defined;
  77. };
  78. static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
  79. {
  80. vmw_du_cleanup(&sou->base);
  81. kfree(sou);
  82. }
  83. /*
  84. * Screen Object Display Unit CRTC functions
  85. */
  86. static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
  87. {
  88. vmw_sou_destroy(vmw_crtc_to_sou(crtc));
  89. }
  90. /**
  91. * Send the fifo command to create a screen.
  92. */
  93. static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
  94. struct vmw_screen_object_unit *sou,
  95. uint32_t x, uint32_t y,
  96. struct drm_display_mode *mode)
  97. {
  98. size_t fifo_size;
  99. struct {
  100. struct {
  101. uint32_t cmdType;
  102. } header;
  103. SVGAScreenObject obj;
  104. } *cmd;
  105. BUG_ON(!sou->buffer);
  106. fifo_size = sizeof(*cmd);
  107. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  108. /* The hardware has hung, nothing we can do about it here. */
  109. if (unlikely(cmd == NULL)) {
  110. DRM_ERROR("Fifo reserve failed.\n");
  111. return -ENOMEM;
  112. }
  113. memset(cmd, 0, fifo_size);
  114. cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
  115. cmd->obj.structSize = sizeof(SVGAScreenObject);
  116. cmd->obj.id = sou->base.unit;
  117. cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
  118. (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
  119. cmd->obj.size.width = mode->hdisplay;
  120. cmd->obj.size.height = mode->vdisplay;
  121. if (sou->base.is_implicit) {
  122. cmd->obj.root.x = x;
  123. cmd->obj.root.y = y;
  124. } else {
  125. cmd->obj.root.x = sou->base.gui_x;
  126. cmd->obj.root.y = sou->base.gui_y;
  127. }
  128. sou->base.set_gui_x = cmd->obj.root.x;
  129. sou->base.set_gui_y = cmd->obj.root.y;
  130. /* Ok to assume that buffer is pinned in vram */
  131. vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
  132. cmd->obj.backingStore.pitch = mode->hdisplay * 4;
  133. vmw_fifo_commit(dev_priv, fifo_size);
  134. sou->defined = true;
  135. return 0;
  136. }
  137. /**
  138. * Send the fifo command to destroy a screen.
  139. */
  140. static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
  141. struct vmw_screen_object_unit *sou)
  142. {
  143. size_t fifo_size;
  144. int ret;
  145. struct {
  146. struct {
  147. uint32_t cmdType;
  148. } header;
  149. SVGAFifoCmdDestroyScreen body;
  150. } *cmd;
  151. /* no need to do anything */
  152. if (unlikely(!sou->defined))
  153. return 0;
  154. fifo_size = sizeof(*cmd);
  155. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  156. /* the hardware has hung, nothing we can do about it here */
  157. if (unlikely(cmd == NULL)) {
  158. DRM_ERROR("Fifo reserve failed.\n");
  159. return -ENOMEM;
  160. }
  161. memset(cmd, 0, fifo_size);
  162. cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
  163. cmd->body.screenId = sou->base.unit;
  164. vmw_fifo_commit(dev_priv, fifo_size);
  165. /* Force sync */
  166. ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
  167. if (unlikely(ret != 0))
  168. DRM_ERROR("Failed to sync with HW");
  169. else
  170. sou->defined = false;
  171. return ret;
  172. }
  173. /**
  174. * Free the backing store.
  175. */
  176. static void vmw_sou_backing_free(struct vmw_private *dev_priv,
  177. struct vmw_screen_object_unit *sou)
  178. {
  179. vmw_dmabuf_unreference(&sou->buffer);
  180. sou->buffer_size = 0;
  181. }
  182. /**
  183. * Allocate the backing store for the buffer.
  184. */
  185. static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
  186. struct vmw_screen_object_unit *sou,
  187. unsigned long size)
  188. {
  189. int ret;
  190. if (sou->buffer_size == size)
  191. return 0;
  192. if (sou->buffer)
  193. vmw_sou_backing_free(dev_priv, sou);
  194. sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
  195. if (unlikely(sou->buffer == NULL))
  196. return -ENOMEM;
  197. /* After we have alloced the backing store might not be able to
  198. * resume the overlays, this is preferred to failing to alloc.
  199. */
  200. vmw_overlay_pause_all(dev_priv);
  201. ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
  202. &vmw_vram_ne_placement,
  203. false, &vmw_dmabuf_bo_free);
  204. vmw_overlay_resume_all(dev_priv);
  205. if (unlikely(ret != 0))
  206. sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
  207. else
  208. sou->buffer_size = size;
  209. return ret;
  210. }
  211. static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
  212. {
  213. struct vmw_private *dev_priv;
  214. struct vmw_screen_object_unit *sou;
  215. struct drm_connector *connector;
  216. struct drm_display_mode *mode;
  217. struct drm_encoder *encoder;
  218. struct vmw_framebuffer *vfb;
  219. struct drm_framebuffer *fb;
  220. struct drm_crtc *crtc;
  221. int ret = 0;
  222. if (!set)
  223. return -EINVAL;
  224. if (!set->crtc)
  225. return -EINVAL;
  226. /* get the sou */
  227. crtc = set->crtc;
  228. sou = vmw_crtc_to_sou(crtc);
  229. vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
  230. dev_priv = vmw_priv(crtc->dev);
  231. if (set->num_connectors > 1) {
  232. DRM_ERROR("Too many connectors\n");
  233. return -EINVAL;
  234. }
  235. if (set->num_connectors == 1 &&
  236. set->connectors[0] != &sou->base.connector) {
  237. DRM_ERROR("Connector doesn't match %p %p\n",
  238. set->connectors[0], &sou->base.connector);
  239. return -EINVAL;
  240. }
  241. /* Only one active implicit frame-buffer at a time. */
  242. mutex_lock(&dev_priv->global_kms_state_mutex);
  243. if (sou->base.is_implicit &&
  244. dev_priv->implicit_fb && vfb &&
  245. !(dev_priv->num_implicit == 1 &&
  246. sou->base.active_implicit) &&
  247. dev_priv->implicit_fb != vfb) {
  248. mutex_unlock(&dev_priv->global_kms_state_mutex);
  249. DRM_ERROR("Multiple implicit framebuffers not supported.\n");
  250. return -EINVAL;
  251. }
  252. mutex_unlock(&dev_priv->global_kms_state_mutex);
  253. /* since they always map one to one these are safe */
  254. connector = &sou->base.connector;
  255. encoder = &sou->base.encoder;
  256. /* should we turn the crtc off */
  257. if (set->num_connectors == 0 || !set->mode || !set->fb) {
  258. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  259. /* the hardware has hung don't do anything more */
  260. if (unlikely(ret != 0))
  261. return ret;
  262. connector->encoder = NULL;
  263. encoder->crtc = NULL;
  264. crtc->primary->fb = NULL;
  265. crtc->x = 0;
  266. crtc->y = 0;
  267. crtc->enabled = false;
  268. vmw_kms_del_active(dev_priv, &sou->base);
  269. vmw_sou_backing_free(dev_priv, sou);
  270. return 0;
  271. }
  272. /* we now know we want to set a mode */
  273. mode = set->mode;
  274. fb = set->fb;
  275. if (set->x + mode->hdisplay > fb->width ||
  276. set->y + mode->vdisplay > fb->height) {
  277. DRM_ERROR("set outside of framebuffer\n");
  278. return -EINVAL;
  279. }
  280. vmw_svga_enable(dev_priv);
  281. if (mode->hdisplay != crtc->mode.hdisplay ||
  282. mode->vdisplay != crtc->mode.vdisplay) {
  283. /* no need to check if depth is different, because backing
  284. * store depth is forced to 4 by the device.
  285. */
  286. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  287. /* the hardware has hung don't do anything more */
  288. if (unlikely(ret != 0))
  289. return ret;
  290. vmw_sou_backing_free(dev_priv, sou);
  291. }
  292. if (!sou->buffer) {
  293. /* forced to depth 4 by the device */
  294. size_t size = mode->hdisplay * mode->vdisplay * 4;
  295. ret = vmw_sou_backing_alloc(dev_priv, sou, size);
  296. if (unlikely(ret != 0))
  297. return ret;
  298. }
  299. ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
  300. if (unlikely(ret != 0)) {
  301. /*
  302. * We are in a bit of a situation here, the hardware has
  303. * hung and we may or may not have a buffer hanging of
  304. * the screen object, best thing to do is not do anything
  305. * if we where defined, if not just turn the crtc of.
  306. * Not what userspace wants but it needs to htfu.
  307. */
  308. if (sou->defined)
  309. return ret;
  310. connector->encoder = NULL;
  311. encoder->crtc = NULL;
  312. crtc->primary->fb = NULL;
  313. crtc->x = 0;
  314. crtc->y = 0;
  315. crtc->enabled = false;
  316. return ret;
  317. }
  318. vmw_kms_add_active(dev_priv, &sou->base, vfb);
  319. connector->encoder = encoder;
  320. encoder->crtc = crtc;
  321. crtc->mode = *mode;
  322. crtc->primary->fb = fb;
  323. crtc->x = set->x;
  324. crtc->y = set->y;
  325. crtc->enabled = true;
  326. return 0;
  327. }
  328. static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
  329. struct drm_framebuffer *fb,
  330. struct drm_pending_vblank_event *event,
  331. uint32_t flags)
  332. {
  333. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  334. struct drm_framebuffer *old_fb = crtc->primary->fb;
  335. struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
  336. struct vmw_fence_obj *fence = NULL;
  337. struct drm_vmw_rect vclips;
  338. int ret;
  339. if (!vmw_kms_crtc_flippable(dev_priv, crtc))
  340. return -EINVAL;
  341. crtc->primary->fb = fb;
  342. /* do a full screen dirty update */
  343. vclips.x = crtc->x;
  344. vclips.y = crtc->y;
  345. vclips.w = crtc->mode.hdisplay;
  346. vclips.h = crtc->mode.vdisplay;
  347. if (vfb->dmabuf)
  348. ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
  349. NULL, &vclips, 1, 1,
  350. true, &fence);
  351. else
  352. ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
  353. NULL, &vclips, NULL,
  354. 0, 0, 1, 1, &fence);
  355. if (ret != 0)
  356. goto out_no_fence;
  357. if (!fence) {
  358. ret = -EINVAL;
  359. goto out_no_fence;
  360. }
  361. if (event) {
  362. struct drm_file *file_priv = event->base.file_priv;
  363. ret = vmw_event_fence_action_queue(file_priv, fence,
  364. &event->base,
  365. &event->event.tv_sec,
  366. &event->event.tv_usec,
  367. true);
  368. }
  369. /*
  370. * No need to hold on to this now. The only cleanup
  371. * we need to do if we fail is unref the fence.
  372. */
  373. vmw_fence_obj_unreference(&fence);
  374. if (vmw_crtc_to_du(crtc)->is_implicit)
  375. vmw_kms_update_implicit_fb(dev_priv, crtc);
  376. return ret;
  377. out_no_fence:
  378. crtc->primary->fb = old_fb;
  379. return ret;
  380. }
  381. static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
  382. .cursor_set2 = vmw_du_crtc_cursor_set2,
  383. .cursor_move = vmw_du_crtc_cursor_move,
  384. .gamma_set = vmw_du_crtc_gamma_set,
  385. .destroy = vmw_sou_crtc_destroy,
  386. .set_config = vmw_sou_crtc_set_config,
  387. .page_flip = vmw_sou_crtc_page_flip,
  388. };
  389. /*
  390. * Screen Object Display Unit encoder functions
  391. */
  392. static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
  393. {
  394. vmw_sou_destroy(vmw_encoder_to_sou(encoder));
  395. }
  396. static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
  397. .destroy = vmw_sou_encoder_destroy,
  398. };
  399. /*
  400. * Screen Object Display Unit connector functions
  401. */
  402. static void vmw_sou_connector_destroy(struct drm_connector *connector)
  403. {
  404. vmw_sou_destroy(vmw_connector_to_sou(connector));
  405. }
  406. static const struct drm_connector_funcs vmw_sou_connector_funcs = {
  407. .dpms = vmw_du_connector_dpms,
  408. .detect = vmw_du_connector_detect,
  409. .fill_modes = vmw_du_connector_fill_modes,
  410. .set_property = vmw_du_connector_set_property,
  411. .destroy = vmw_sou_connector_destroy,
  412. };
  413. static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
  414. {
  415. struct vmw_screen_object_unit *sou;
  416. struct drm_device *dev = dev_priv->dev;
  417. struct drm_connector *connector;
  418. struct drm_encoder *encoder;
  419. struct drm_crtc *crtc;
  420. sou = kzalloc(sizeof(*sou), GFP_KERNEL);
  421. if (!sou)
  422. return -ENOMEM;
  423. sou->base.unit = unit;
  424. crtc = &sou->base.crtc;
  425. encoder = &sou->base.encoder;
  426. connector = &sou->base.connector;
  427. sou->base.active_implicit = false;
  428. sou->base.pref_active = (unit == 0);
  429. sou->base.pref_width = dev_priv->initial_width;
  430. sou->base.pref_height = dev_priv->initial_height;
  431. sou->base.pref_mode = NULL;
  432. sou->base.is_implicit = false;
  433. drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
  434. DRM_MODE_CONNECTOR_VIRTUAL);
  435. connector->status = vmw_du_connector_detect(connector, true);
  436. drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
  437. DRM_MODE_ENCODER_VIRTUAL, NULL);
  438. drm_mode_connector_attach_encoder(connector, encoder);
  439. encoder->possible_crtcs = (1 << unit);
  440. encoder->possible_clones = 0;
  441. (void) drm_connector_register(connector);
  442. drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
  443. drm_mode_crtc_set_gamma_size(crtc, 256);
  444. drm_object_attach_property(&connector->base,
  445. dev_priv->hotplug_mode_update_property, 1);
  446. drm_object_attach_property(&connector->base,
  447. dev->mode_config.suggested_x_property, 0);
  448. drm_object_attach_property(&connector->base,
  449. dev->mode_config.suggested_y_property, 0);
  450. if (dev_priv->implicit_placement_property)
  451. drm_object_attach_property
  452. (&connector->base,
  453. dev_priv->implicit_placement_property,
  454. sou->base.is_implicit);
  455. return 0;
  456. }
  457. int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
  458. {
  459. struct drm_device *dev = dev_priv->dev;
  460. int i, ret;
  461. if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
  462. DRM_INFO("Not using screen objects,"
  463. " missing cap SCREEN_OBJECT_2\n");
  464. return -ENOSYS;
  465. }
  466. ret = -ENOMEM;
  467. dev_priv->num_implicit = 0;
  468. dev_priv->implicit_fb = NULL;
  469. ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
  470. if (unlikely(ret != 0))
  471. return ret;
  472. vmw_kms_create_implicit_placement_property(dev_priv, false);
  473. for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
  474. vmw_sou_init(dev_priv, i);
  475. dev_priv->active_display_unit = vmw_du_screen_object;
  476. DRM_INFO("Screen Objects Display Unit initialized\n");
  477. return 0;
  478. }
  479. int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
  480. {
  481. struct drm_device *dev = dev_priv->dev;
  482. drm_vblank_cleanup(dev);
  483. return 0;
  484. }
  485. static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
  486. struct vmw_framebuffer *framebuffer)
  487. {
  488. struct vmw_dma_buffer *buf =
  489. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  490. base)->buffer;
  491. int depth = framebuffer->base.format->depth;
  492. struct {
  493. uint32_t header;
  494. SVGAFifoCmdDefineGMRFB body;
  495. } *cmd;
  496. /* Emulate RGBA support, contrary to svga_reg.h this is not
  497. * supported by hosts. This is only a problem if we are reading
  498. * this value later and expecting what we uploaded back.
  499. */
  500. if (depth == 32)
  501. depth = 24;
  502. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  503. if (!cmd) {
  504. DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
  505. return -ENOMEM;
  506. }
  507. cmd->header = SVGA_CMD_DEFINE_GMRFB;
  508. cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
  509. cmd->body.format.colorDepth = depth;
  510. cmd->body.format.reserved = 0;
  511. cmd->body.bytesPerLine = framebuffer->base.pitches[0];
  512. /* Buffer is reserved in vram or GMR */
  513. vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
  514. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  515. return 0;
  516. }
  517. /**
  518. * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
  519. * blit surface to screen command.
  520. *
  521. * @dirty: The closure structure.
  522. *
  523. * Fills in the missing fields in the command, and translates the cliprects
  524. * to match the destination bounding box encoded.
  525. */
  526. static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
  527. {
  528. struct vmw_kms_sou_surface_dirty *sdirty =
  529. container_of(dirty, typeof(*sdirty), base);
  530. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  531. s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
  532. s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
  533. size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
  534. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  535. int i;
  536. if (!dirty->num_hits) {
  537. vmw_fifo_commit(dirty->dev_priv, 0);
  538. return;
  539. }
  540. cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
  541. cmd->header.size = sizeof(cmd->body) + region_size;
  542. /*
  543. * Use the destination bounding box to specify destination - and
  544. * source bounding regions.
  545. */
  546. cmd->body.destRect.left = sdirty->left;
  547. cmd->body.destRect.right = sdirty->right;
  548. cmd->body.destRect.top = sdirty->top;
  549. cmd->body.destRect.bottom = sdirty->bottom;
  550. cmd->body.srcRect.left = sdirty->left + trans_x;
  551. cmd->body.srcRect.right = sdirty->right + trans_x;
  552. cmd->body.srcRect.top = sdirty->top + trans_y;
  553. cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
  554. cmd->body.srcImage.sid = sdirty->sid;
  555. cmd->body.destScreenId = dirty->unit->unit;
  556. /* Blits are relative to the destination rect. Translate. */
  557. for (i = 0; i < dirty->num_hits; ++i, ++blit) {
  558. blit->left -= sdirty->left;
  559. blit->right -= sdirty->left;
  560. blit->top -= sdirty->top;
  561. blit->bottom -= sdirty->top;
  562. }
  563. vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
  564. sdirty->left = sdirty->top = S32_MAX;
  565. sdirty->right = sdirty->bottom = S32_MIN;
  566. }
  567. /**
  568. * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
  569. *
  570. * @dirty: The closure structure
  571. *
  572. * Encodes a SVGASignedRect cliprect and updates the bounding box of the
  573. * BLIT_SURFACE_TO_SCREEN command.
  574. */
  575. static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
  576. {
  577. struct vmw_kms_sou_surface_dirty *sdirty =
  578. container_of(dirty, typeof(*sdirty), base);
  579. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  580. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  581. /* Destination rect. */
  582. blit += dirty->num_hits;
  583. blit->left = dirty->unit_x1;
  584. blit->top = dirty->unit_y1;
  585. blit->right = dirty->unit_x2;
  586. blit->bottom = dirty->unit_y2;
  587. /* Destination bounding box */
  588. sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
  589. sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
  590. sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
  591. sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
  592. dirty->num_hits++;
  593. }
  594. /**
  595. * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
  596. *
  597. * @dev_priv: Pointer to the device private structure.
  598. * @framebuffer: Pointer to the surface-buffer backed framebuffer.
  599. * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
  600. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  601. * be NULL.
  602. * @srf: Pointer to surface to blit from. If NULL, the surface attached
  603. * to @framebuffer will be used.
  604. * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
  605. * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
  606. * @num_clips: Number of clip rects in @clips.
  607. * @inc: Increment to use when looping over @clips.
  608. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  609. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  610. * case the device has already synchronized.
  611. *
  612. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  613. * interrupted.
  614. */
  615. int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
  616. struct vmw_framebuffer *framebuffer,
  617. struct drm_clip_rect *clips,
  618. struct drm_vmw_rect *vclips,
  619. struct vmw_resource *srf,
  620. s32 dest_x,
  621. s32 dest_y,
  622. unsigned num_clips, int inc,
  623. struct vmw_fence_obj **out_fence)
  624. {
  625. struct vmw_framebuffer_surface *vfbs =
  626. container_of(framebuffer, typeof(*vfbs), base);
  627. struct vmw_kms_sou_surface_dirty sdirty;
  628. int ret;
  629. if (!srf)
  630. srf = &vfbs->surface->res;
  631. ret = vmw_kms_helper_resource_prepare(srf, true);
  632. if (ret)
  633. return ret;
  634. sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
  635. sdirty.base.clip = vmw_sou_surface_clip;
  636. sdirty.base.dev_priv = dev_priv;
  637. sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
  638. sizeof(SVGASignedRect) * num_clips;
  639. sdirty.sid = srf->id;
  640. sdirty.left = sdirty.top = S32_MAX;
  641. sdirty.right = sdirty.bottom = S32_MIN;
  642. sdirty.dst_x = dest_x;
  643. sdirty.dst_y = dest_y;
  644. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  645. dest_x, dest_y, num_clips, inc,
  646. &sdirty.base);
  647. vmw_kms_helper_resource_finish(srf, out_fence);
  648. return ret;
  649. }
  650. /**
  651. * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
  652. *
  653. * @dirty: The closure structure.
  654. *
  655. * Commits a previously built command buffer of readback clips.
  656. */
  657. static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  658. {
  659. if (!dirty->num_hits) {
  660. vmw_fifo_commit(dirty->dev_priv, 0);
  661. return;
  662. }
  663. vmw_fifo_commit(dirty->dev_priv,
  664. sizeof(struct vmw_kms_sou_dmabuf_blit) *
  665. dirty->num_hits);
  666. }
  667. /**
  668. * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
  669. *
  670. * @dirty: The closure structure
  671. *
  672. * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
  673. */
  674. static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
  675. {
  676. struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
  677. blit += dirty->num_hits;
  678. blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
  679. blit->body.destScreenId = dirty->unit->unit;
  680. blit->body.srcOrigin.x = dirty->fb_x;
  681. blit->body.srcOrigin.y = dirty->fb_y;
  682. blit->body.destRect.left = dirty->unit_x1;
  683. blit->body.destRect.top = dirty->unit_y1;
  684. blit->body.destRect.right = dirty->unit_x2;
  685. blit->body.destRect.bottom = dirty->unit_y2;
  686. dirty->num_hits++;
  687. }
  688. /**
  689. * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
  690. *
  691. * @dev_priv: Pointer to the device private structure.
  692. * @framebuffer: Pointer to the dma-buffer backed framebuffer.
  693. * @clips: Array of clip rects.
  694. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  695. * be NULL.
  696. * @num_clips: Number of clip rects in @clips.
  697. * @increment: Increment to use when looping over @clips.
  698. * @interruptible: Whether to perform waits interruptible if possible.
  699. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  700. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  701. * case the device has already synchronized.
  702. *
  703. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  704. * interrupted.
  705. */
  706. int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
  707. struct vmw_framebuffer *framebuffer,
  708. struct drm_clip_rect *clips,
  709. struct drm_vmw_rect *vclips,
  710. unsigned num_clips, int increment,
  711. bool interruptible,
  712. struct vmw_fence_obj **out_fence)
  713. {
  714. struct vmw_dma_buffer *buf =
  715. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  716. base)->buffer;
  717. struct vmw_kms_dirty dirty;
  718. int ret;
  719. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
  720. false);
  721. if (ret)
  722. return ret;
  723. ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
  724. if (unlikely(ret != 0))
  725. goto out_revert;
  726. dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
  727. dirty.clip = vmw_sou_dmabuf_clip;
  728. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
  729. num_clips;
  730. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  731. 0, 0, num_clips, increment, &dirty);
  732. vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
  733. return ret;
  734. out_revert:
  735. vmw_kms_helper_buffer_revert(buf);
  736. return ret;
  737. }
  738. /**
  739. * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
  740. *
  741. * @dirty: The closure structure.
  742. *
  743. * Commits a previously built command buffer of readback clips.
  744. */
  745. static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
  746. {
  747. if (!dirty->num_hits) {
  748. vmw_fifo_commit(dirty->dev_priv, 0);
  749. return;
  750. }
  751. vmw_fifo_commit(dirty->dev_priv,
  752. sizeof(struct vmw_kms_sou_readback_blit) *
  753. dirty->num_hits);
  754. }
  755. /**
  756. * vmw_sou_readback_clip - Callback to encode a readback cliprect.
  757. *
  758. * @dirty: The closure structure
  759. *
  760. * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
  761. */
  762. static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
  763. {
  764. struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
  765. blit += dirty->num_hits;
  766. blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
  767. blit->body.srcScreenId = dirty->unit->unit;
  768. blit->body.destOrigin.x = dirty->fb_x;
  769. blit->body.destOrigin.y = dirty->fb_y;
  770. blit->body.srcRect.left = dirty->unit_x1;
  771. blit->body.srcRect.top = dirty->unit_y1;
  772. blit->body.srcRect.right = dirty->unit_x2;
  773. blit->body.srcRect.bottom = dirty->unit_y2;
  774. dirty->num_hits++;
  775. }
  776. /**
  777. * vmw_kms_sou_readback - Perform a readback from the screen object system to
  778. * a dma-buffer backed framebuffer.
  779. *
  780. * @dev_priv: Pointer to the device private structure.
  781. * @file_priv: Pointer to a struct drm_file identifying the caller.
  782. * Must be set to NULL if @user_fence_rep is NULL.
  783. * @vfb: Pointer to the dma-buffer backed framebuffer.
  784. * @user_fence_rep: User-space provided structure for fence information.
  785. * Must be set to non-NULL if @file_priv is non-NULL.
  786. * @vclips: Array of clip rects.
  787. * @num_clips: Number of clip rects in @vclips.
  788. *
  789. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  790. * interrupted.
  791. */
  792. int vmw_kms_sou_readback(struct vmw_private *dev_priv,
  793. struct drm_file *file_priv,
  794. struct vmw_framebuffer *vfb,
  795. struct drm_vmw_fence_rep __user *user_fence_rep,
  796. struct drm_vmw_rect *vclips,
  797. uint32_t num_clips)
  798. {
  799. struct vmw_dma_buffer *buf =
  800. container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
  801. struct vmw_kms_dirty dirty;
  802. int ret;
  803. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
  804. if (ret)
  805. return ret;
  806. ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
  807. if (unlikely(ret != 0))
  808. goto out_revert;
  809. dirty.fifo_commit = vmw_sou_readback_fifo_commit;
  810. dirty.clip = vmw_sou_readback_clip;
  811. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
  812. num_clips;
  813. ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
  814. 0, 0, num_clips, 1, &dirty);
  815. vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
  816. user_fence_rep);
  817. return ret;
  818. out_revert:
  819. vmw_kms_helper_buffer_revert(buf);
  820. return ret;
  821. }