vmwgfx_scrn.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_kms.h"
  28. #include <drm/drm_plane_helper.h>
  29. #include <drm/drm_atomic.h>
  30. #include <drm/drm_atomic_helper.h>
  31. #define vmw_crtc_to_sou(x) \
  32. container_of(x, struct vmw_screen_object_unit, base.crtc)
  33. #define vmw_encoder_to_sou(x) \
  34. container_of(x, struct vmw_screen_object_unit, base.encoder)
  35. #define vmw_connector_to_sou(x) \
  36. container_of(x, struct vmw_screen_object_unit, base.connector)
  37. /**
  38. * struct vmw_kms_sou_surface_dirty - Closure structure for
  39. * blit surface to screen command.
  40. * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
  41. * @left: Left side of bounding box.
  42. * @right: Right side of bounding box.
  43. * @top: Top side of bounding box.
  44. * @bottom: Bottom side of bounding box.
  45. * @dst_x: Difference between source clip rects and framebuffer coordinates.
  46. * @dst_y: Difference between source clip rects and framebuffer coordinates.
  47. * @sid: Surface id of surface to copy from.
  48. */
  49. struct vmw_kms_sou_surface_dirty {
  50. struct vmw_kms_dirty base;
  51. s32 left, right, top, bottom;
  52. s32 dst_x, dst_y;
  53. u32 sid;
  54. };
  55. /*
  56. * SVGA commands that are used by this code. Please see the device headers
  57. * for explanation.
  58. */
  59. struct vmw_kms_sou_readback_blit {
  60. uint32 header;
  61. SVGAFifoCmdBlitScreenToGMRFB body;
  62. };
  63. struct vmw_kms_sou_bo_blit {
  64. uint32 header;
  65. SVGAFifoCmdBlitGMRFBToScreen body;
  66. };
  67. struct vmw_kms_sou_dirty_cmd {
  68. SVGA3dCmdHeader header;
  69. SVGA3dCmdBlitSurfaceToScreen body;
  70. };
  71. /**
  72. * Display unit using screen objects.
  73. */
  74. struct vmw_screen_object_unit {
  75. struct vmw_display_unit base;
  76. unsigned long buffer_size; /**< Size of allocated buffer */
  77. struct vmw_buffer_object *buffer; /**< Backing store buffer */
  78. bool defined;
  79. };
  80. static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
  81. {
  82. vmw_du_cleanup(&sou->base);
  83. kfree(sou);
  84. }
  85. /*
  86. * Screen Object Display Unit CRTC functions
  87. */
  88. static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
  89. {
  90. vmw_sou_destroy(vmw_crtc_to_sou(crtc));
  91. }
  92. /**
  93. * Send the fifo command to create a screen.
  94. */
  95. static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
  96. struct vmw_screen_object_unit *sou,
  97. int x, int y,
  98. struct drm_display_mode *mode)
  99. {
  100. size_t fifo_size;
  101. struct {
  102. struct {
  103. uint32_t cmdType;
  104. } header;
  105. SVGAScreenObject obj;
  106. } *cmd;
  107. BUG_ON(!sou->buffer);
  108. fifo_size = sizeof(*cmd);
  109. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  110. /* The hardware has hung, nothing we can do about it here. */
  111. if (unlikely(cmd == NULL)) {
  112. DRM_ERROR("Fifo reserve failed.\n");
  113. return -ENOMEM;
  114. }
  115. memset(cmd, 0, fifo_size);
  116. cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
  117. cmd->obj.structSize = sizeof(SVGAScreenObject);
  118. cmd->obj.id = sou->base.unit;
  119. cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
  120. (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
  121. cmd->obj.size.width = mode->hdisplay;
  122. cmd->obj.size.height = mode->vdisplay;
  123. cmd->obj.root.x = x;
  124. cmd->obj.root.y = y;
  125. sou->base.set_gui_x = cmd->obj.root.x;
  126. sou->base.set_gui_y = cmd->obj.root.y;
  127. /* Ok to assume that buffer is pinned in vram */
  128. vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
  129. cmd->obj.backingStore.pitch = mode->hdisplay * 4;
  130. vmw_fifo_commit(dev_priv, fifo_size);
  131. sou->defined = true;
  132. return 0;
  133. }
  134. /**
  135. * Send the fifo command to destroy a screen.
  136. */
  137. static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
  138. struct vmw_screen_object_unit *sou)
  139. {
  140. size_t fifo_size;
  141. int ret;
  142. struct {
  143. struct {
  144. uint32_t cmdType;
  145. } header;
  146. SVGAFifoCmdDestroyScreen body;
  147. } *cmd;
  148. /* no need to do anything */
  149. if (unlikely(!sou->defined))
  150. return 0;
  151. fifo_size = sizeof(*cmd);
  152. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  153. /* the hardware has hung, nothing we can do about it here */
  154. if (unlikely(cmd == NULL)) {
  155. DRM_ERROR("Fifo reserve failed.\n");
  156. return -ENOMEM;
  157. }
  158. memset(cmd, 0, fifo_size);
  159. cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
  160. cmd->body.screenId = sou->base.unit;
  161. vmw_fifo_commit(dev_priv, fifo_size);
  162. /* Force sync */
  163. ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
  164. if (unlikely(ret != 0))
  165. DRM_ERROR("Failed to sync with HW");
  166. else
  167. sou->defined = false;
  168. return ret;
  169. }
  170. /**
  171. * vmw_sou_crtc_mode_set_nofb - Create new screen
  172. *
  173. * @crtc: CRTC associated with the new screen
  174. *
  175. * This function creates/destroys a screen. This function cannot fail, so if
  176. * somehow we run into a failure, just do the best we can to get out.
  177. */
  178. static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
  179. {
  180. struct vmw_private *dev_priv;
  181. struct vmw_screen_object_unit *sou;
  182. struct vmw_framebuffer *vfb;
  183. struct drm_framebuffer *fb;
  184. struct drm_plane_state *ps;
  185. struct vmw_plane_state *vps;
  186. int ret;
  187. sou = vmw_crtc_to_sou(crtc);
  188. dev_priv = vmw_priv(crtc->dev);
  189. ps = crtc->primary->state;
  190. fb = ps->fb;
  191. vps = vmw_plane_state_to_vps(ps);
  192. vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
  193. if (sou->defined) {
  194. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  195. if (ret) {
  196. DRM_ERROR("Failed to destroy Screen Object\n");
  197. return;
  198. }
  199. }
  200. if (vfb) {
  201. struct drm_connector_state *conn_state;
  202. struct vmw_connector_state *vmw_conn_state;
  203. int x, y;
  204. sou->buffer = vps->bo;
  205. sou->buffer_size = vps->bo_size;
  206. if (sou->base.is_implicit) {
  207. x = crtc->x;
  208. y = crtc->y;
  209. } else {
  210. conn_state = sou->base.connector.state;
  211. vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
  212. x = vmw_conn_state->gui_x;
  213. y = vmw_conn_state->gui_y;
  214. }
  215. ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
  216. if (ret)
  217. DRM_ERROR("Failed to define Screen Object %dx%d\n",
  218. crtc->x, crtc->y);
  219. vmw_kms_add_active(dev_priv, &sou->base, vfb);
  220. } else {
  221. sou->buffer = NULL;
  222. sou->buffer_size = 0;
  223. vmw_kms_del_active(dev_priv, &sou->base);
  224. }
  225. }
  226. /**
  227. * vmw_sou_crtc_helper_prepare - Noop
  228. *
  229. * @crtc: CRTC associated with the new screen
  230. *
  231. * Prepares the CRTC for a mode set, but we don't need to do anything here.
  232. */
  233. static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
  234. {
  235. }
  236. /**
  237. * vmw_sou_crtc_atomic_enable - Noop
  238. *
  239. * @crtc: CRTC associated with the new screen
  240. *
  241. * This is called after a mode set has been completed.
  242. */
  243. static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
  244. struct drm_crtc_state *old_state)
  245. {
  246. }
  247. /**
  248. * vmw_sou_crtc_atomic_disable - Turns off CRTC
  249. *
  250. * @crtc: CRTC to be turned off
  251. */
  252. static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
  253. struct drm_crtc_state *old_state)
  254. {
  255. struct vmw_private *dev_priv;
  256. struct vmw_screen_object_unit *sou;
  257. int ret;
  258. if (!crtc) {
  259. DRM_ERROR("CRTC is NULL\n");
  260. return;
  261. }
  262. sou = vmw_crtc_to_sou(crtc);
  263. dev_priv = vmw_priv(crtc->dev);
  264. if (sou->defined) {
  265. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  266. if (ret)
  267. DRM_ERROR("Failed to destroy Screen Object\n");
  268. }
  269. }
  270. static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
  271. struct drm_framebuffer *new_fb,
  272. struct drm_pending_vblank_event *event,
  273. uint32_t flags,
  274. struct drm_modeset_acquire_ctx *ctx)
  275. {
  276. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  277. int ret;
  278. if (!vmw_kms_crtc_flippable(dev_priv, crtc))
  279. return -EINVAL;
  280. ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
  281. if (ret) {
  282. DRM_ERROR("Page flip error %d.\n", ret);
  283. return ret;
  284. }
  285. if (vmw_crtc_to_du(crtc)->is_implicit)
  286. vmw_kms_update_implicit_fb(dev_priv, crtc);
  287. return ret;
  288. }
  289. static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
  290. .gamma_set = vmw_du_crtc_gamma_set,
  291. .destroy = vmw_sou_crtc_destroy,
  292. .reset = vmw_du_crtc_reset,
  293. .atomic_duplicate_state = vmw_du_crtc_duplicate_state,
  294. .atomic_destroy_state = vmw_du_crtc_destroy_state,
  295. .set_config = vmw_kms_set_config,
  296. .page_flip = vmw_sou_crtc_page_flip,
  297. };
  298. /*
  299. * Screen Object Display Unit encoder functions
  300. */
  301. static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
  302. {
  303. vmw_sou_destroy(vmw_encoder_to_sou(encoder));
  304. }
  305. static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
  306. .destroy = vmw_sou_encoder_destroy,
  307. };
  308. /*
  309. * Screen Object Display Unit connector functions
  310. */
  311. static void vmw_sou_connector_destroy(struct drm_connector *connector)
  312. {
  313. vmw_sou_destroy(vmw_connector_to_sou(connector));
  314. }
  315. static const struct drm_connector_funcs vmw_sou_connector_funcs = {
  316. .dpms = vmw_du_connector_dpms,
  317. .detect = vmw_du_connector_detect,
  318. .fill_modes = vmw_du_connector_fill_modes,
  319. .set_property = vmw_du_connector_set_property,
  320. .destroy = vmw_sou_connector_destroy,
  321. .reset = vmw_du_connector_reset,
  322. .atomic_duplicate_state = vmw_du_connector_duplicate_state,
  323. .atomic_destroy_state = vmw_du_connector_destroy_state,
  324. .atomic_set_property = vmw_du_connector_atomic_set_property,
  325. .atomic_get_property = vmw_du_connector_atomic_get_property,
  326. };
  327. static const struct
  328. drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
  329. .best_encoder = drm_atomic_helper_best_encoder,
  330. };
  331. /*
  332. * Screen Object Display Plane Functions
  333. */
  334. /**
  335. * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
  336. *
  337. * @plane: display plane
  338. * @old_state: Contains the FB to clean up
  339. *
  340. * Unpins the display surface
  341. *
  342. * Returns 0 on success
  343. */
  344. static void
  345. vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
  346. struct drm_plane_state *old_state)
  347. {
  348. struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
  349. struct drm_crtc *crtc = plane->state->crtc ?
  350. plane->state->crtc : old_state->crtc;
  351. if (vps->bo)
  352. vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
  353. vmw_bo_unreference(&vps->bo);
  354. vps->bo_size = 0;
  355. vmw_du_plane_cleanup_fb(plane, old_state);
  356. }
  357. /**
  358. * vmw_sou_primary_plane_prepare_fb - allocate backing buffer
  359. *
  360. * @plane: display plane
  361. * @new_state: info on the new plane state, including the FB
  362. *
  363. * The SOU backing buffer is our equivalent of the display plane.
  364. *
  365. * Returns 0 on success
  366. */
  367. static int
  368. vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
  369. struct drm_plane_state *new_state)
  370. {
  371. struct drm_framebuffer *new_fb = new_state->fb;
  372. struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
  373. struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
  374. struct vmw_private *dev_priv;
  375. size_t size;
  376. int ret;
  377. if (!new_fb) {
  378. vmw_bo_unreference(&vps->bo);
  379. vps->bo_size = 0;
  380. return 0;
  381. }
  382. size = new_state->crtc_w * new_state->crtc_h * 4;
  383. dev_priv = vmw_priv(crtc->dev);
  384. if (vps->bo) {
  385. if (vps->bo_size == size) {
  386. /*
  387. * Note that this might temporarily up the pin-count
  388. * to 2, until cleanup_fb() is called.
  389. */
  390. return vmw_bo_pin_in_vram(dev_priv, vps->bo,
  391. true);
  392. }
  393. vmw_bo_unreference(&vps->bo);
  394. vps->bo_size = 0;
  395. }
  396. vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
  397. if (!vps->bo)
  398. return -ENOMEM;
  399. vmw_svga_enable(dev_priv);
  400. /* After we have alloced the backing store might not be able to
  401. * resume the overlays, this is preferred to failing to alloc.
  402. */
  403. vmw_overlay_pause_all(dev_priv);
  404. ret = vmw_bo_init(dev_priv, vps->bo, size,
  405. &vmw_vram_ne_placement,
  406. false, &vmw_bo_bo_free);
  407. vmw_overlay_resume_all(dev_priv);
  408. if (ret) {
  409. vps->bo = NULL; /* vmw_bo_init frees on error */
  410. return ret;
  411. }
  412. vps->bo_size = size;
  413. /*
  414. * TTM already thinks the buffer is pinned, but make sure the
  415. * pin_count is upped.
  416. */
  417. return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
  418. }
  419. static void
  420. vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
  421. struct drm_plane_state *old_state)
  422. {
  423. struct drm_crtc *crtc = plane->state->crtc;
  424. struct drm_pending_vblank_event *event = NULL;
  425. struct vmw_fence_obj *fence = NULL;
  426. int ret;
  427. if (crtc && plane->state->fb) {
  428. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  429. struct vmw_framebuffer *vfb =
  430. vmw_framebuffer_to_vfb(plane->state->fb);
  431. struct drm_vmw_rect vclips;
  432. vclips.x = crtc->x;
  433. vclips.y = crtc->y;
  434. vclips.w = crtc->mode.hdisplay;
  435. vclips.h = crtc->mode.vdisplay;
  436. if (vfb->bo)
  437. ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
  438. &vclips, 1, 1, true,
  439. &fence, crtc);
  440. else
  441. ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
  442. &vclips, NULL, 0, 0,
  443. 1, 1, &fence, crtc);
  444. /*
  445. * We cannot really fail this function, so if we do, then output
  446. * an error and maintain consistent atomic state.
  447. */
  448. if (ret != 0)
  449. DRM_ERROR("Failed to update screen.\n");
  450. } else {
  451. /*
  452. * When disabling a plane, CRTC and FB should always be NULL
  453. * together, otherwise it's an error.
  454. * Here primary plane is being disable so should really blank
  455. * the screen object display unit, if not already done.
  456. */
  457. return;
  458. }
  459. event = crtc->state->event;
  460. /*
  461. * In case of failure and other cases, vblank event will be sent in
  462. * vmw_du_crtc_atomic_flush.
  463. */
  464. if (event && fence) {
  465. struct drm_file *file_priv = event->base.file_priv;
  466. ret = vmw_event_fence_action_queue(file_priv,
  467. fence,
  468. &event->base,
  469. &event->event.vbl.tv_sec,
  470. &event->event.vbl.tv_usec,
  471. true);
  472. if (unlikely(ret != 0))
  473. DRM_ERROR("Failed to queue event on fence.\n");
  474. else
  475. crtc->state->event = NULL;
  476. }
  477. if (fence)
  478. vmw_fence_obj_unreference(&fence);
  479. }
  480. static const struct drm_plane_funcs vmw_sou_plane_funcs = {
  481. .update_plane = drm_atomic_helper_update_plane,
  482. .disable_plane = drm_atomic_helper_disable_plane,
  483. .destroy = vmw_du_primary_plane_destroy,
  484. .reset = vmw_du_plane_reset,
  485. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  486. .atomic_destroy_state = vmw_du_plane_destroy_state,
  487. };
  488. static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
  489. .update_plane = drm_atomic_helper_update_plane,
  490. .disable_plane = drm_atomic_helper_disable_plane,
  491. .destroy = vmw_du_cursor_plane_destroy,
  492. .reset = vmw_du_plane_reset,
  493. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  494. .atomic_destroy_state = vmw_du_plane_destroy_state,
  495. };
  496. /*
  497. * Atomic Helpers
  498. */
  499. static const struct
  500. drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
  501. .atomic_check = vmw_du_cursor_plane_atomic_check,
  502. .atomic_update = vmw_du_cursor_plane_atomic_update,
  503. .prepare_fb = vmw_du_cursor_plane_prepare_fb,
  504. .cleanup_fb = vmw_du_plane_cleanup_fb,
  505. };
  506. static const struct
  507. drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
  508. .atomic_check = vmw_du_primary_plane_atomic_check,
  509. .atomic_update = vmw_sou_primary_plane_atomic_update,
  510. .prepare_fb = vmw_sou_primary_plane_prepare_fb,
  511. .cleanup_fb = vmw_sou_primary_plane_cleanup_fb,
  512. };
  513. static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
  514. .prepare = vmw_sou_crtc_helper_prepare,
  515. .mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
  516. .atomic_check = vmw_du_crtc_atomic_check,
  517. .atomic_begin = vmw_du_crtc_atomic_begin,
  518. .atomic_flush = vmw_du_crtc_atomic_flush,
  519. .atomic_enable = vmw_sou_crtc_atomic_enable,
  520. .atomic_disable = vmw_sou_crtc_atomic_disable,
  521. };
  522. static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
  523. {
  524. struct vmw_screen_object_unit *sou;
  525. struct drm_device *dev = dev_priv->dev;
  526. struct drm_connector *connector;
  527. struct drm_encoder *encoder;
  528. struct drm_plane *primary, *cursor;
  529. struct drm_crtc *crtc;
  530. int ret;
  531. sou = kzalloc(sizeof(*sou), GFP_KERNEL);
  532. if (!sou)
  533. return -ENOMEM;
  534. sou->base.unit = unit;
  535. crtc = &sou->base.crtc;
  536. encoder = &sou->base.encoder;
  537. connector = &sou->base.connector;
  538. primary = &sou->base.primary;
  539. cursor = &sou->base.cursor;
  540. sou->base.active_implicit = false;
  541. sou->base.pref_active = (unit == 0);
  542. sou->base.pref_width = dev_priv->initial_width;
  543. sou->base.pref_height = dev_priv->initial_height;
  544. sou->base.pref_mode = NULL;
  545. /*
  546. * Remove this after enabling atomic because property values can
  547. * only exist in a state object
  548. */
  549. sou->base.is_implicit = false;
  550. /* Initialize primary plane */
  551. vmw_du_plane_reset(primary);
  552. ret = drm_universal_plane_init(dev, &sou->base.primary,
  553. 0, &vmw_sou_plane_funcs,
  554. vmw_primary_plane_formats,
  555. ARRAY_SIZE(vmw_primary_plane_formats),
  556. NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
  557. if (ret) {
  558. DRM_ERROR("Failed to initialize primary plane");
  559. goto err_free;
  560. }
  561. drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
  562. /* Initialize cursor plane */
  563. vmw_du_plane_reset(cursor);
  564. ret = drm_universal_plane_init(dev, &sou->base.cursor,
  565. 0, &vmw_sou_cursor_funcs,
  566. vmw_cursor_plane_formats,
  567. ARRAY_SIZE(vmw_cursor_plane_formats),
  568. NULL, DRM_PLANE_TYPE_CURSOR, NULL);
  569. if (ret) {
  570. DRM_ERROR("Failed to initialize cursor plane");
  571. drm_plane_cleanup(&sou->base.primary);
  572. goto err_free;
  573. }
  574. drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
  575. vmw_du_connector_reset(connector);
  576. ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
  577. DRM_MODE_CONNECTOR_VIRTUAL);
  578. if (ret) {
  579. DRM_ERROR("Failed to initialize connector\n");
  580. goto err_free;
  581. }
  582. drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
  583. connector->status = vmw_du_connector_detect(connector, true);
  584. vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
  585. ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
  586. DRM_MODE_ENCODER_VIRTUAL, NULL);
  587. if (ret) {
  588. DRM_ERROR("Failed to initialize encoder\n");
  589. goto err_free_connector;
  590. }
  591. (void) drm_connector_attach_encoder(connector, encoder);
  592. encoder->possible_crtcs = (1 << unit);
  593. encoder->possible_clones = 0;
  594. ret = drm_connector_register(connector);
  595. if (ret) {
  596. DRM_ERROR("Failed to register connector\n");
  597. goto err_free_encoder;
  598. }
  599. vmw_du_crtc_reset(crtc);
  600. ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
  601. &sou->base.cursor,
  602. &vmw_screen_object_crtc_funcs, NULL);
  603. if (ret) {
  604. DRM_ERROR("Failed to initialize CRTC\n");
  605. goto err_free_unregister;
  606. }
  607. drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs);
  608. drm_mode_crtc_set_gamma_size(crtc, 256);
  609. drm_object_attach_property(&connector->base,
  610. dev_priv->hotplug_mode_update_property, 1);
  611. drm_object_attach_property(&connector->base,
  612. dev->mode_config.suggested_x_property, 0);
  613. drm_object_attach_property(&connector->base,
  614. dev->mode_config.suggested_y_property, 0);
  615. if (dev_priv->implicit_placement_property)
  616. drm_object_attach_property
  617. (&connector->base,
  618. dev_priv->implicit_placement_property,
  619. sou->base.is_implicit);
  620. return 0;
  621. err_free_unregister:
  622. drm_connector_unregister(connector);
  623. err_free_encoder:
  624. drm_encoder_cleanup(encoder);
  625. err_free_connector:
  626. drm_connector_cleanup(connector);
  627. err_free:
  628. kfree(sou);
  629. return ret;
  630. }
  631. int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
  632. {
  633. struct drm_device *dev = dev_priv->dev;
  634. int i, ret;
  635. if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
  636. DRM_INFO("Not using screen objects,"
  637. " missing cap SCREEN_OBJECT_2\n");
  638. return -ENOSYS;
  639. }
  640. ret = -ENOMEM;
  641. dev_priv->num_implicit = 0;
  642. dev_priv->implicit_fb = NULL;
  643. ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
  644. if (unlikely(ret != 0))
  645. return ret;
  646. vmw_kms_create_implicit_placement_property(dev_priv, false);
  647. for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
  648. vmw_sou_init(dev_priv, i);
  649. dev_priv->active_display_unit = vmw_du_screen_object;
  650. DRM_INFO("Screen Objects Display Unit initialized\n");
  651. return 0;
  652. }
  653. static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
  654. struct vmw_framebuffer *framebuffer)
  655. {
  656. struct vmw_buffer_object *buf =
  657. container_of(framebuffer, struct vmw_framebuffer_bo,
  658. base)->buffer;
  659. int depth = framebuffer->base.format->depth;
  660. struct {
  661. uint32_t header;
  662. SVGAFifoCmdDefineGMRFB body;
  663. } *cmd;
  664. /* Emulate RGBA support, contrary to svga_reg.h this is not
  665. * supported by hosts. This is only a problem if we are reading
  666. * this value later and expecting what we uploaded back.
  667. */
  668. if (depth == 32)
  669. depth = 24;
  670. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  671. if (!cmd) {
  672. DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
  673. return -ENOMEM;
  674. }
  675. cmd->header = SVGA_CMD_DEFINE_GMRFB;
  676. cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
  677. cmd->body.format.colorDepth = depth;
  678. cmd->body.format.reserved = 0;
  679. cmd->body.bytesPerLine = framebuffer->base.pitches[0];
  680. /* Buffer is reserved in vram or GMR */
  681. vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
  682. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  683. return 0;
  684. }
  685. /**
  686. * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
  687. * blit surface to screen command.
  688. *
  689. * @dirty: The closure structure.
  690. *
  691. * Fills in the missing fields in the command, and translates the cliprects
  692. * to match the destination bounding box encoded.
  693. */
  694. static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
  695. {
  696. struct vmw_kms_sou_surface_dirty *sdirty =
  697. container_of(dirty, typeof(*sdirty), base);
  698. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  699. s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
  700. s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
  701. size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
  702. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  703. int i;
  704. if (!dirty->num_hits) {
  705. vmw_fifo_commit(dirty->dev_priv, 0);
  706. return;
  707. }
  708. cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
  709. cmd->header.size = sizeof(cmd->body) + region_size;
  710. /*
  711. * Use the destination bounding box to specify destination - and
  712. * source bounding regions.
  713. */
  714. cmd->body.destRect.left = sdirty->left;
  715. cmd->body.destRect.right = sdirty->right;
  716. cmd->body.destRect.top = sdirty->top;
  717. cmd->body.destRect.bottom = sdirty->bottom;
  718. cmd->body.srcRect.left = sdirty->left + trans_x;
  719. cmd->body.srcRect.right = sdirty->right + trans_x;
  720. cmd->body.srcRect.top = sdirty->top + trans_y;
  721. cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
  722. cmd->body.srcImage.sid = sdirty->sid;
  723. cmd->body.destScreenId = dirty->unit->unit;
  724. /* Blits are relative to the destination rect. Translate. */
  725. for (i = 0; i < dirty->num_hits; ++i, ++blit) {
  726. blit->left -= sdirty->left;
  727. blit->right -= sdirty->left;
  728. blit->top -= sdirty->top;
  729. blit->bottom -= sdirty->top;
  730. }
  731. vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
  732. sdirty->left = sdirty->top = S32_MAX;
  733. sdirty->right = sdirty->bottom = S32_MIN;
  734. }
  735. /**
  736. * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
  737. *
  738. * @dirty: The closure structure
  739. *
  740. * Encodes a SVGASignedRect cliprect and updates the bounding box of the
  741. * BLIT_SURFACE_TO_SCREEN command.
  742. */
  743. static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
  744. {
  745. struct vmw_kms_sou_surface_dirty *sdirty =
  746. container_of(dirty, typeof(*sdirty), base);
  747. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  748. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  749. /* Destination rect. */
  750. blit += dirty->num_hits;
  751. blit->left = dirty->unit_x1;
  752. blit->top = dirty->unit_y1;
  753. blit->right = dirty->unit_x2;
  754. blit->bottom = dirty->unit_y2;
  755. /* Destination bounding box */
  756. sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
  757. sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
  758. sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
  759. sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
  760. dirty->num_hits++;
  761. }
  762. /**
  763. * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
  764. *
  765. * @dev_priv: Pointer to the device private structure.
  766. * @framebuffer: Pointer to the surface-buffer backed framebuffer.
  767. * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
  768. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  769. * be NULL.
  770. * @srf: Pointer to surface to blit from. If NULL, the surface attached
  771. * to @framebuffer will be used.
  772. * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
  773. * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
  774. * @num_clips: Number of clip rects in @clips.
  775. * @inc: Increment to use when looping over @clips.
  776. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  777. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  778. * case the device has already synchronized.
  779. * @crtc: If crtc is passed, perform surface dirty on that crtc only.
  780. *
  781. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  782. * interrupted.
  783. */
  784. int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
  785. struct vmw_framebuffer *framebuffer,
  786. struct drm_clip_rect *clips,
  787. struct drm_vmw_rect *vclips,
  788. struct vmw_resource *srf,
  789. s32 dest_x,
  790. s32 dest_y,
  791. unsigned num_clips, int inc,
  792. struct vmw_fence_obj **out_fence,
  793. struct drm_crtc *crtc)
  794. {
  795. struct vmw_framebuffer_surface *vfbs =
  796. container_of(framebuffer, typeof(*vfbs), base);
  797. struct vmw_kms_sou_surface_dirty sdirty;
  798. DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
  799. int ret;
  800. if (!srf)
  801. srf = &vfbs->surface->res;
  802. ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
  803. if (ret)
  804. return ret;
  805. ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
  806. if (ret)
  807. goto out_unref;
  808. sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
  809. sdirty.base.clip = vmw_sou_surface_clip;
  810. sdirty.base.dev_priv = dev_priv;
  811. sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
  812. sizeof(SVGASignedRect) * num_clips;
  813. sdirty.base.crtc = crtc;
  814. sdirty.sid = srf->id;
  815. sdirty.left = sdirty.top = S32_MAX;
  816. sdirty.right = sdirty.bottom = S32_MIN;
  817. sdirty.dst_x = dest_x;
  818. sdirty.dst_y = dest_y;
  819. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  820. dest_x, dest_y, num_clips, inc,
  821. &sdirty.base);
  822. vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
  823. NULL);
  824. return ret;
  825. out_unref:
  826. vmw_validation_unref_lists(&val_ctx);
  827. return ret;
  828. }
  829. /**
  830. * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
  831. *
  832. * @dirty: The closure structure.
  833. *
  834. * Commits a previously built command buffer of readback clips.
  835. */
  836. static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
  837. {
  838. if (!dirty->num_hits) {
  839. vmw_fifo_commit(dirty->dev_priv, 0);
  840. return;
  841. }
  842. vmw_fifo_commit(dirty->dev_priv,
  843. sizeof(struct vmw_kms_sou_bo_blit) *
  844. dirty->num_hits);
  845. }
  846. /**
  847. * vmw_sou_bo_clip - Callback to encode a readback cliprect.
  848. *
  849. * @dirty: The closure structure
  850. *
  851. * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
  852. */
  853. static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
  854. {
  855. struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
  856. blit += dirty->num_hits;
  857. blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
  858. blit->body.destScreenId = dirty->unit->unit;
  859. blit->body.srcOrigin.x = dirty->fb_x;
  860. blit->body.srcOrigin.y = dirty->fb_y;
  861. blit->body.destRect.left = dirty->unit_x1;
  862. blit->body.destRect.top = dirty->unit_y1;
  863. blit->body.destRect.right = dirty->unit_x2;
  864. blit->body.destRect.bottom = dirty->unit_y2;
  865. dirty->num_hits++;
  866. }
  867. /**
  868. * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
  869. *
  870. * @dev_priv: Pointer to the device private structure.
  871. * @framebuffer: Pointer to the buffer-object backed framebuffer.
  872. * @clips: Array of clip rects.
  873. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  874. * be NULL.
  875. * @num_clips: Number of clip rects in @clips.
  876. * @increment: Increment to use when looping over @clips.
  877. * @interruptible: Whether to perform waits interruptible if possible.
  878. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  879. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  880. * case the device has already synchronized.
  881. * @crtc: If crtc is passed, perform bo dirty on that crtc only.
  882. *
  883. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  884. * interrupted.
  885. */
  886. int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
  887. struct vmw_framebuffer *framebuffer,
  888. struct drm_clip_rect *clips,
  889. struct drm_vmw_rect *vclips,
  890. unsigned num_clips, int increment,
  891. bool interruptible,
  892. struct vmw_fence_obj **out_fence,
  893. struct drm_crtc *crtc)
  894. {
  895. struct vmw_buffer_object *buf =
  896. container_of(framebuffer, struct vmw_framebuffer_bo,
  897. base)->buffer;
  898. struct vmw_kms_dirty dirty;
  899. DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
  900. int ret;
  901. ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
  902. if (ret)
  903. return ret;
  904. ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
  905. if (ret)
  906. goto out_unref;
  907. ret = do_bo_define_gmrfb(dev_priv, framebuffer);
  908. if (unlikely(ret != 0))
  909. goto out_revert;
  910. dirty.crtc = crtc;
  911. dirty.fifo_commit = vmw_sou_bo_fifo_commit;
  912. dirty.clip = vmw_sou_bo_clip;
  913. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
  914. num_clips;
  915. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  916. 0, 0, num_clips, increment, &dirty);
  917. vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
  918. NULL);
  919. return ret;
  920. out_revert:
  921. vmw_validation_revert(&val_ctx);
  922. out_unref:
  923. vmw_validation_unref_lists(&val_ctx);
  924. return ret;
  925. }
  926. /**
  927. * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
  928. *
  929. * @dirty: The closure structure.
  930. *
  931. * Commits a previously built command buffer of readback clips.
  932. */
  933. static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
  934. {
  935. if (!dirty->num_hits) {
  936. vmw_fifo_commit(dirty->dev_priv, 0);
  937. return;
  938. }
  939. vmw_fifo_commit(dirty->dev_priv,
  940. sizeof(struct vmw_kms_sou_readback_blit) *
  941. dirty->num_hits);
  942. }
  943. /**
  944. * vmw_sou_readback_clip - Callback to encode a readback cliprect.
  945. *
  946. * @dirty: The closure structure
  947. *
  948. * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
  949. */
  950. static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
  951. {
  952. struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
  953. blit += dirty->num_hits;
  954. blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
  955. blit->body.srcScreenId = dirty->unit->unit;
  956. blit->body.destOrigin.x = dirty->fb_x;
  957. blit->body.destOrigin.y = dirty->fb_y;
  958. blit->body.srcRect.left = dirty->unit_x1;
  959. blit->body.srcRect.top = dirty->unit_y1;
  960. blit->body.srcRect.right = dirty->unit_x2;
  961. blit->body.srcRect.bottom = dirty->unit_y2;
  962. dirty->num_hits++;
  963. }
  964. /**
  965. * vmw_kms_sou_readback - Perform a readback from the screen object system to
  966. * a buffer-object backed framebuffer.
  967. *
  968. * @dev_priv: Pointer to the device private structure.
  969. * @file_priv: Pointer to a struct drm_file identifying the caller.
  970. * Must be set to NULL if @user_fence_rep is NULL.
  971. * @vfb: Pointer to the buffer-object backed framebuffer.
  972. * @user_fence_rep: User-space provided structure for fence information.
  973. * Must be set to non-NULL if @file_priv is non-NULL.
  974. * @vclips: Array of clip rects.
  975. * @num_clips: Number of clip rects in @vclips.
  976. * @crtc: If crtc is passed, readback on that crtc only.
  977. *
  978. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  979. * interrupted.
  980. */
  981. int vmw_kms_sou_readback(struct vmw_private *dev_priv,
  982. struct drm_file *file_priv,
  983. struct vmw_framebuffer *vfb,
  984. struct drm_vmw_fence_rep __user *user_fence_rep,
  985. struct drm_vmw_rect *vclips,
  986. uint32_t num_clips,
  987. struct drm_crtc *crtc)
  988. {
  989. struct vmw_buffer_object *buf =
  990. container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
  991. struct vmw_kms_dirty dirty;
  992. DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
  993. int ret;
  994. ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
  995. if (ret)
  996. return ret;
  997. ret = vmw_validation_prepare(&val_ctx, NULL, true);
  998. if (ret)
  999. goto out_unref;
  1000. ret = do_bo_define_gmrfb(dev_priv, vfb);
  1001. if (unlikely(ret != 0))
  1002. goto out_revert;
  1003. dirty.crtc = crtc;
  1004. dirty.fifo_commit = vmw_sou_readback_fifo_commit;
  1005. dirty.clip = vmw_sou_readback_clip;
  1006. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
  1007. num_clips;
  1008. ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
  1009. 0, 0, num_clips, 1, &dirty);
  1010. vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
  1011. user_fence_rep);
  1012. return ret;
  1013. out_revert:
  1014. vmw_validation_revert(&val_ctx);
  1015. out_unref:
  1016. vmw_validation_unref_lists(&val_ctx);
  1017. return ret;
  1018. }