vmwgfx_drm.h 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #ifndef __VMWGFX_DRM_H__
  28. #define __VMWGFX_DRM_H__
  29. #ifndef __KERNEL__
  30. #include <drm.h>
  31. #endif
  32. #define DRM_VMW_MAX_SURFACE_FACES 6
  33. #define DRM_VMW_MAX_MIP_LEVELS 24
  34. #define DRM_VMW_GET_PARAM 0
  35. #define DRM_VMW_ALLOC_DMABUF 1
  36. #define DRM_VMW_UNREF_DMABUF 2
  37. #define DRM_VMW_CURSOR_BYPASS 3
  38. /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
  39. #define DRM_VMW_CONTROL_STREAM 4
  40. #define DRM_VMW_CLAIM_STREAM 5
  41. #define DRM_VMW_UNREF_STREAM 6
  42. /* guarded by DRM_VMW_PARAM_3D == 1 */
  43. #define DRM_VMW_CREATE_CONTEXT 7
  44. #define DRM_VMW_UNREF_CONTEXT 8
  45. #define DRM_VMW_CREATE_SURFACE 9
  46. #define DRM_VMW_UNREF_SURFACE 10
  47. #define DRM_VMW_REF_SURFACE 11
  48. #define DRM_VMW_EXECBUF 12
  49. #define DRM_VMW_GET_3D_CAP 13
  50. #define DRM_VMW_FENCE_WAIT 14
  51. #define DRM_VMW_FENCE_SIGNALED 15
  52. #define DRM_VMW_FENCE_UNREF 16
  53. #define DRM_VMW_FENCE_EVENT 17
  54. #define DRM_VMW_PRESENT 18
  55. #define DRM_VMW_PRESENT_READBACK 19
  56. #define DRM_VMW_UPDATE_LAYOUT 20
  57. #define DRM_VMW_CREATE_SHADER 21
  58. #define DRM_VMW_UNREF_SHADER 22
  59. #define DRM_VMW_GB_SURFACE_CREATE 23
  60. #define DRM_VMW_GB_SURFACE_REF 24
  61. #define DRM_VMW_SYNCCPU 25
  62. /*************************************************************************/
  63. /**
  64. * DRM_VMW_GET_PARAM - get device information.
  65. *
  66. * DRM_VMW_PARAM_FIFO_OFFSET:
  67. * Offset to use to map the first page of the FIFO read-only.
  68. * The fifo is mapped using the mmap() system call on the drm device.
  69. *
  70. * DRM_VMW_PARAM_OVERLAY_IOCTL:
  71. * Does the driver support the overlay ioctl.
  72. */
  73. #define DRM_VMW_PARAM_NUM_STREAMS 0
  74. #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
  75. #define DRM_VMW_PARAM_3D 2
  76. #define DRM_VMW_PARAM_HW_CAPS 3
  77. #define DRM_VMW_PARAM_FIFO_CAPS 4
  78. #define DRM_VMW_PARAM_MAX_FB_SIZE 5
  79. #define DRM_VMW_PARAM_FIFO_HW_VERSION 6
  80. #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
  81. #define DRM_VMW_PARAM_3D_CAPS_SIZE 8
  82. #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
  83. #define DRM_VMW_PARAM_MAX_MOB_SIZE 10
  84. /**
  85. * struct drm_vmw_getparam_arg
  86. *
  87. * @value: Returned value. //Out
  88. * @param: Parameter to query. //In.
  89. *
  90. * Argument to the DRM_VMW_GET_PARAM Ioctl.
  91. */
  92. struct drm_vmw_getparam_arg {
  93. uint64_t value;
  94. uint32_t param;
  95. uint32_t pad64;
  96. };
  97. /*************************************************************************/
  98. /**
  99. * DRM_VMW_CREATE_CONTEXT - Create a host context.
  100. *
  101. * Allocates a device unique context id, and queues a create context command
  102. * for the host. Does not wait for host completion.
  103. */
  104. /**
  105. * struct drm_vmw_context_arg
  106. *
  107. * @cid: Device unique context ID.
  108. *
  109. * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
  110. * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
  111. */
  112. struct drm_vmw_context_arg {
  113. int32_t cid;
  114. uint32_t pad64;
  115. };
  116. /*************************************************************************/
  117. /**
  118. * DRM_VMW_UNREF_CONTEXT - Create a host context.
  119. *
  120. * Frees a global context id, and queues a destroy host command for the host.
  121. * Does not wait for host completion. The context ID can be used directly
  122. * in the command stream and shows up as the same context ID on the host.
  123. */
  124. /*************************************************************************/
  125. /**
  126. * DRM_VMW_CREATE_SURFACE - Create a host suface.
  127. *
  128. * Allocates a device unique surface id, and queues a create surface command
  129. * for the host. Does not wait for host completion. The surface ID can be
  130. * used directly in the command stream and shows up as the same surface
  131. * ID on the host.
  132. */
  133. /**
  134. * struct drm_wmv_surface_create_req
  135. *
  136. * @flags: Surface flags as understood by the host.
  137. * @format: Surface format as understood by the host.
  138. * @mip_levels: Number of mip levels for each face.
  139. * An unused face should have 0 encoded.
  140. * @size_addr: Address of a user-space array of sruct drm_vmw_size
  141. * cast to an uint64_t for 32-64 bit compatibility.
  142. * The size of the array should equal the total number of mipmap levels.
  143. * @shareable: Boolean whether other clients (as identified by file descriptors)
  144. * may reference this surface.
  145. * @scanout: Boolean whether the surface is intended to be used as a
  146. * scanout.
  147. *
  148. * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
  149. * Output data from the DRM_VMW_REF_SURFACE Ioctl.
  150. */
  151. struct drm_vmw_surface_create_req {
  152. uint32_t flags;
  153. uint32_t format;
  154. uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
  155. uint64_t size_addr;
  156. int32_t shareable;
  157. int32_t scanout;
  158. };
  159. /**
  160. * struct drm_wmv_surface_arg
  161. *
  162. * @sid: Surface id of created surface or surface to destroy or reference.
  163. *
  164. * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
  165. * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
  166. * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
  167. */
  168. struct drm_vmw_surface_arg {
  169. int32_t sid;
  170. uint32_t pad64;
  171. };
  172. /**
  173. * struct drm_vmw_size ioctl.
  174. *
  175. * @width - mip level width
  176. * @height - mip level height
  177. * @depth - mip level depth
  178. *
  179. * Description of a mip level.
  180. * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
  181. */
  182. struct drm_vmw_size {
  183. uint32_t width;
  184. uint32_t height;
  185. uint32_t depth;
  186. uint32_t pad64;
  187. };
  188. /**
  189. * union drm_vmw_surface_create_arg
  190. *
  191. * @rep: Output data as described above.
  192. * @req: Input data as described above.
  193. *
  194. * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
  195. */
  196. union drm_vmw_surface_create_arg {
  197. struct drm_vmw_surface_arg rep;
  198. struct drm_vmw_surface_create_req req;
  199. };
  200. /*************************************************************************/
  201. /**
  202. * DRM_VMW_REF_SURFACE - Reference a host surface.
  203. *
  204. * Puts a reference on a host surface with a give sid, as previously
  205. * returned by the DRM_VMW_CREATE_SURFACE ioctl.
  206. * A reference will make sure the surface isn't destroyed while we hold
  207. * it and will allow the calling client to use the surface ID in the command
  208. * stream.
  209. *
  210. * On successful return, the Ioctl returns the surface information given
  211. * in the DRM_VMW_CREATE_SURFACE ioctl.
  212. */
  213. /**
  214. * union drm_vmw_surface_reference_arg
  215. *
  216. * @rep: Output data as described above.
  217. * @req: Input data as described above.
  218. *
  219. * Argument to the DRM_VMW_REF_SURFACE Ioctl.
  220. */
  221. union drm_vmw_surface_reference_arg {
  222. struct drm_vmw_surface_create_req rep;
  223. struct drm_vmw_surface_arg req;
  224. };
  225. /*************************************************************************/
  226. /**
  227. * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
  228. *
  229. * Clear a reference previously put on a host surface.
  230. * When all references are gone, including the one implicitly placed
  231. * on creation,
  232. * a destroy surface command will be queued for the host.
  233. * Does not wait for completion.
  234. */
  235. /*************************************************************************/
  236. /**
  237. * DRM_VMW_EXECBUF
  238. *
  239. * Submit a command buffer for execution on the host, and return a
  240. * fence seqno that when signaled, indicates that the command buffer has
  241. * executed.
  242. */
  243. /**
  244. * struct drm_vmw_execbuf_arg
  245. *
  246. * @commands: User-space address of a command buffer cast to an uint64_t.
  247. * @command-size: Size in bytes of the command buffer.
  248. * @throttle-us: Sleep until software is less than @throttle_us
  249. * microseconds ahead of hardware. The driver may round this value
  250. * to the nearest kernel tick.
  251. * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
  252. * uint64_t.
  253. * @version: Allows expanding the execbuf ioctl parameters without breaking
  254. * backwards compatibility, since user-space will always tell the kernel
  255. * which version it uses.
  256. * @flags: Execbuf flags. None currently.
  257. *
  258. * Argument to the DRM_VMW_EXECBUF Ioctl.
  259. */
  260. #define DRM_VMW_EXECBUF_VERSION 1
  261. struct drm_vmw_execbuf_arg {
  262. uint64_t commands;
  263. uint32_t command_size;
  264. uint32_t throttle_us;
  265. uint64_t fence_rep;
  266. uint32_t version;
  267. uint32_t flags;
  268. };
  269. /**
  270. * struct drm_vmw_fence_rep
  271. *
  272. * @handle: Fence object handle for fence associated with a command submission.
  273. * @mask: Fence flags relevant for this fence object.
  274. * @seqno: Fence sequence number in fifo. A fence object with a lower
  275. * seqno will signal the EXEC flag before a fence object with a higher
  276. * seqno. This can be used by user-space to avoid kernel calls to determine
  277. * whether a fence has signaled the EXEC flag. Note that @seqno will
  278. * wrap at 32-bit.
  279. * @passed_seqno: The highest seqno number processed by the hardware
  280. * so far. This can be used to mark user-space fence objects as signaled, and
  281. * to determine whether a fence seqno might be stale.
  282. * @error: This member should've been set to -EFAULT on submission.
  283. * The following actions should be take on completion:
  284. * error == -EFAULT: Fence communication failed. The host is synchronized.
  285. * Use the last fence id read from the FIFO fence register.
  286. * error != 0 && error != -EFAULT:
  287. * Fence submission failed. The host is synchronized. Use the fence_seq member.
  288. * error == 0: All is OK, The host may not be synchronized.
  289. * Use the fence_seq member.
  290. *
  291. * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
  292. */
  293. struct drm_vmw_fence_rep {
  294. uint32_t handle;
  295. uint32_t mask;
  296. uint32_t seqno;
  297. uint32_t passed_seqno;
  298. uint32_t pad64;
  299. int32_t error;
  300. };
  301. /*************************************************************************/
  302. /**
  303. * DRM_VMW_ALLOC_DMABUF
  304. *
  305. * Allocate a DMA buffer that is visible also to the host.
  306. * NOTE: The buffer is
  307. * identified by a handle and an offset, which are private to the guest, but
  308. * useable in the command stream. The guest kernel may translate these
  309. * and patch up the command stream accordingly. In the future, the offset may
  310. * be zero at all times, or it may disappear from the interface before it is
  311. * fixed.
  312. *
  313. * The DMA buffer may stay user-space mapped in the guest at all times,
  314. * and is thus suitable for sub-allocation.
  315. *
  316. * DMA buffers are mapped using the mmap() syscall on the drm device.
  317. */
  318. /**
  319. * struct drm_vmw_alloc_dmabuf_req
  320. *
  321. * @size: Required minimum size of the buffer.
  322. *
  323. * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
  324. */
  325. struct drm_vmw_alloc_dmabuf_req {
  326. uint32_t size;
  327. uint32_t pad64;
  328. };
  329. /**
  330. * struct drm_vmw_dmabuf_rep
  331. *
  332. * @map_handle: Offset to use in the mmap() call used to map the buffer.
  333. * @handle: Handle unique to this buffer. Used for unreferencing.
  334. * @cur_gmr_id: GMR id to use in the command stream when this buffer is
  335. * referenced. See not above.
  336. * @cur_gmr_offset: Offset to use in the command stream when this buffer is
  337. * referenced. See note above.
  338. *
  339. * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
  340. */
  341. struct drm_vmw_dmabuf_rep {
  342. uint64_t map_handle;
  343. uint32_t handle;
  344. uint32_t cur_gmr_id;
  345. uint32_t cur_gmr_offset;
  346. uint32_t pad64;
  347. };
  348. /**
  349. * union drm_vmw_dmabuf_arg
  350. *
  351. * @req: Input data as described above.
  352. * @rep: Output data as described above.
  353. *
  354. * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
  355. */
  356. union drm_vmw_alloc_dmabuf_arg {
  357. struct drm_vmw_alloc_dmabuf_req req;
  358. struct drm_vmw_dmabuf_rep rep;
  359. };
  360. /*************************************************************************/
  361. /**
  362. * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
  363. *
  364. */
  365. /**
  366. * struct drm_vmw_unref_dmabuf_arg
  367. *
  368. * @handle: Handle indicating what buffer to free. Obtained from the
  369. * DRM_VMW_ALLOC_DMABUF Ioctl.
  370. *
  371. * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
  372. */
  373. struct drm_vmw_unref_dmabuf_arg {
  374. uint32_t handle;
  375. uint32_t pad64;
  376. };
  377. /*************************************************************************/
  378. /**
  379. * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
  380. *
  381. * This IOCTL controls the overlay units of the svga device.
  382. * The SVGA overlay units does not work like regular hardware units in
  383. * that they do not automaticaly read back the contents of the given dma
  384. * buffer. But instead only read back for each call to this ioctl, and
  385. * at any point between this call being made and a following call that
  386. * either changes the buffer or disables the stream.
  387. */
  388. /**
  389. * struct drm_vmw_rect
  390. *
  391. * Defines a rectangle. Used in the overlay ioctl to define
  392. * source and destination rectangle.
  393. */
  394. struct drm_vmw_rect {
  395. int32_t x;
  396. int32_t y;
  397. uint32_t w;
  398. uint32_t h;
  399. };
  400. /**
  401. * struct drm_vmw_control_stream_arg
  402. *
  403. * @stream_id: Stearm to control
  404. * @enabled: If false all following arguments are ignored.
  405. * @handle: Handle to buffer for getting data from.
  406. * @format: Format of the overlay as understood by the host.
  407. * @width: Width of the overlay.
  408. * @height: Height of the overlay.
  409. * @size: Size of the overlay in bytes.
  410. * @pitch: Array of pitches, the two last are only used for YUV12 formats.
  411. * @offset: Offset from start of dma buffer to overlay.
  412. * @src: Source rect, must be within the defined area above.
  413. * @dst: Destination rect, x and y may be negative.
  414. *
  415. * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
  416. */
  417. struct drm_vmw_control_stream_arg {
  418. uint32_t stream_id;
  419. uint32_t enabled;
  420. uint32_t flags;
  421. uint32_t color_key;
  422. uint32_t handle;
  423. uint32_t offset;
  424. int32_t format;
  425. uint32_t size;
  426. uint32_t width;
  427. uint32_t height;
  428. uint32_t pitch[3];
  429. uint32_t pad64;
  430. struct drm_vmw_rect src;
  431. struct drm_vmw_rect dst;
  432. };
  433. /*************************************************************************/
  434. /**
  435. * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
  436. *
  437. */
  438. #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
  439. #define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
  440. /**
  441. * struct drm_vmw_cursor_bypass_arg
  442. *
  443. * @flags: Flags.
  444. * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
  445. * @xpos: X position of cursor.
  446. * @ypos: Y position of cursor.
  447. * @xhot: X hotspot.
  448. * @yhot: Y hotspot.
  449. *
  450. * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
  451. */
  452. struct drm_vmw_cursor_bypass_arg {
  453. uint32_t flags;
  454. uint32_t crtc_id;
  455. int32_t xpos;
  456. int32_t ypos;
  457. int32_t xhot;
  458. int32_t yhot;
  459. };
  460. /*************************************************************************/
  461. /**
  462. * DRM_VMW_CLAIM_STREAM - Claim a single stream.
  463. */
  464. /**
  465. * struct drm_vmw_context_arg
  466. *
  467. * @stream_id: Device unique context ID.
  468. *
  469. * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
  470. * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
  471. */
  472. struct drm_vmw_stream_arg {
  473. uint32_t stream_id;
  474. uint32_t pad64;
  475. };
  476. /*************************************************************************/
  477. /**
  478. * DRM_VMW_UNREF_STREAM - Unclaim a stream.
  479. *
  480. * Return a single stream that was claimed by this process. Also makes
  481. * sure that the stream has been stopped.
  482. */
  483. /*************************************************************************/
  484. /**
  485. * DRM_VMW_GET_3D_CAP
  486. *
  487. * Read 3D capabilities from the FIFO
  488. *
  489. */
  490. /**
  491. * struct drm_vmw_get_3d_cap_arg
  492. *
  493. * @buffer: Pointer to a buffer for capability data, cast to an uint64_t
  494. * @size: Max size to copy
  495. *
  496. * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
  497. * ioctls.
  498. */
  499. struct drm_vmw_get_3d_cap_arg {
  500. uint64_t buffer;
  501. uint32_t max_size;
  502. uint32_t pad64;
  503. };
  504. /*************************************************************************/
  505. /**
  506. * DRM_VMW_FENCE_WAIT
  507. *
  508. * Waits for a fence object to signal. The wait is interruptible, so that
  509. * signals may be delivered during the interrupt. The wait may timeout,
  510. * in which case the calls returns -EBUSY. If the wait is restarted,
  511. * that is restarting without resetting @cookie_valid to zero,
  512. * the timeout is computed from the first call.
  513. *
  514. * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
  515. * on:
  516. * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
  517. * stream
  518. * have executed.
  519. * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
  520. * commands
  521. * in the buffer given to the EXECBUF ioctl returning the fence object handle
  522. * are available to user-space.
  523. *
  524. * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
  525. * fenc wait ioctl returns 0, the fence object has been unreferenced after
  526. * the wait.
  527. */
  528. #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
  529. #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
  530. #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
  531. /**
  532. * struct drm_vmw_fence_wait_arg
  533. *
  534. * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
  535. * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
  536. * @kernel_cookie: Set to 0 on first call. Left alone on restart.
  537. * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
  538. * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
  539. * before returning.
  540. * @flags: Fence flags to wait on.
  541. * @wait_options: Options that control the behaviour of the wait ioctl.
  542. *
  543. * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
  544. */
  545. struct drm_vmw_fence_wait_arg {
  546. uint32_t handle;
  547. int32_t cookie_valid;
  548. uint64_t kernel_cookie;
  549. uint64_t timeout_us;
  550. int32_t lazy;
  551. int32_t flags;
  552. int32_t wait_options;
  553. int32_t pad64;
  554. };
  555. /*************************************************************************/
  556. /**
  557. * DRM_VMW_FENCE_SIGNALED
  558. *
  559. * Checks if a fence object is signaled..
  560. */
  561. /**
  562. * struct drm_vmw_fence_signaled_arg
  563. *
  564. * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
  565. * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
  566. * @signaled: Out: Flags signaled.
  567. * @sequence: Out: Highest sequence passed so far. Can be used to signal the
  568. * EXEC flag of user-space fence objects.
  569. *
  570. * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
  571. * ioctls.
  572. */
  573. struct drm_vmw_fence_signaled_arg {
  574. uint32_t handle;
  575. uint32_t flags;
  576. int32_t signaled;
  577. uint32_t passed_seqno;
  578. uint32_t signaled_flags;
  579. uint32_t pad64;
  580. };
  581. /*************************************************************************/
  582. /**
  583. * DRM_VMW_FENCE_UNREF
  584. *
  585. * Unreferences a fence object, and causes it to be destroyed if there are no
  586. * other references to it.
  587. *
  588. */
  589. /**
  590. * struct drm_vmw_fence_arg
  591. *
  592. * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
  593. *
  594. * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
  595. */
  596. struct drm_vmw_fence_arg {
  597. uint32_t handle;
  598. uint32_t pad64;
  599. };
  600. /*************************************************************************/
  601. /**
  602. * DRM_VMW_FENCE_EVENT
  603. *
  604. * Queues an event on a fence to be delivered on the drm character device
  605. * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
  606. * Optionally the approximate time when the fence signaled is
  607. * given by the event.
  608. */
  609. /*
  610. * The event type
  611. */
  612. #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
  613. struct drm_vmw_event_fence {
  614. struct drm_event base;
  615. uint64_t user_data;
  616. uint32_t tv_sec;
  617. uint32_t tv_usec;
  618. };
  619. /*
  620. * Flags that may be given to the command.
  621. */
  622. /* Request fence signaled time on the event. */
  623. #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
  624. /**
  625. * struct drm_vmw_fence_event_arg
  626. *
  627. * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if
  628. * the fence is not supposed to be referenced by user-space.
  629. * @user_info: Info to be delivered with the event.
  630. * @handle: Attach the event to this fence only.
  631. * @flags: A set of flags as defined above.
  632. */
  633. struct drm_vmw_fence_event_arg {
  634. uint64_t fence_rep;
  635. uint64_t user_data;
  636. uint32_t handle;
  637. uint32_t flags;
  638. };
  639. /*************************************************************************/
  640. /**
  641. * DRM_VMW_PRESENT
  642. *
  643. * Executes an SVGA present on a given fb for a given surface. The surface
  644. * is placed on the framebuffer. Cliprects are given relative to the given
  645. * point (the point disignated by dest_{x|y}).
  646. *
  647. */
  648. /**
  649. * struct drm_vmw_present_arg
  650. * @fb_id: framebuffer id to present / read back from.
  651. * @sid: Surface id to present from.
  652. * @dest_x: X placement coordinate for surface.
  653. * @dest_y: Y placement coordinate for surface.
  654. * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
  655. * @num_clips: Number of cliprects given relative to the framebuffer origin,
  656. * in the same coordinate space as the frame buffer.
  657. * @pad64: Unused 64-bit padding.
  658. *
  659. * Input argument to the DRM_VMW_PRESENT ioctl.
  660. */
  661. struct drm_vmw_present_arg {
  662. uint32_t fb_id;
  663. uint32_t sid;
  664. int32_t dest_x;
  665. int32_t dest_y;
  666. uint64_t clips_ptr;
  667. uint32_t num_clips;
  668. uint32_t pad64;
  669. };
  670. /*************************************************************************/
  671. /**
  672. * DRM_VMW_PRESENT_READBACK
  673. *
  674. * Executes an SVGA present readback from a given fb to the dma buffer
  675. * currently bound as the fb. If there is no dma buffer bound to the fb,
  676. * an error will be returned.
  677. *
  678. */
  679. /**
  680. * struct drm_vmw_present_arg
  681. * @fb_id: fb_id to present / read back from.
  682. * @num_clips: Number of cliprects.
  683. * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
  684. * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t.
  685. * If this member is NULL, then the ioctl should not return a fence.
  686. */
  687. struct drm_vmw_present_readback_arg {
  688. uint32_t fb_id;
  689. uint32_t num_clips;
  690. uint64_t clips_ptr;
  691. uint64_t fence_rep;
  692. };
  693. /*************************************************************************/
  694. /**
  695. * DRM_VMW_UPDATE_LAYOUT - Update layout
  696. *
  697. * Updates the preferred modes and connection status for connectors. The
  698. * command consists of one drm_vmw_update_layout_arg pointing to an array
  699. * of num_outputs drm_vmw_rect's.
  700. */
  701. /**
  702. * struct drm_vmw_update_layout_arg
  703. *
  704. * @num_outputs: number of active connectors
  705. * @rects: pointer to array of drm_vmw_rect cast to an uint64_t
  706. *
  707. * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
  708. */
  709. struct drm_vmw_update_layout_arg {
  710. uint32_t num_outputs;
  711. uint32_t pad64;
  712. uint64_t rects;
  713. };
  714. /*************************************************************************/
  715. /**
  716. * DRM_VMW_CREATE_SHADER - Create shader
  717. *
  718. * Creates a shader and optionally binds it to a dma buffer containing
  719. * the shader byte-code.
  720. */
  721. /**
  722. * enum drm_vmw_shader_type - Shader types
  723. */
  724. enum drm_vmw_shader_type {
  725. drm_vmw_shader_type_vs = 0,
  726. drm_vmw_shader_type_ps,
  727. drm_vmw_shader_type_gs
  728. };
  729. /**
  730. * struct drm_vmw_shader_create_arg
  731. *
  732. * @shader_type: Shader type of the shader to create.
  733. * @size: Size of the byte-code in bytes.
  734. * where the shader byte-code starts
  735. * @buffer_handle: Buffer handle identifying the buffer containing the
  736. * shader byte-code
  737. * @shader_handle: On successful completion contains a handle that
  738. * can be used to subsequently identify the shader.
  739. * @offset: Offset in bytes into the buffer given by @buffer_handle,
  740. *
  741. * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
  742. */
  743. struct drm_vmw_shader_create_arg {
  744. enum drm_vmw_shader_type shader_type;
  745. uint32_t size;
  746. uint32_t buffer_handle;
  747. uint32_t shader_handle;
  748. uint64_t offset;
  749. };
  750. /*************************************************************************/
  751. /**
  752. * DRM_VMW_UNREF_SHADER - Unreferences a shader
  753. *
  754. * Destroys a user-space reference to a shader, optionally destroying
  755. * it.
  756. */
  757. /**
  758. * struct drm_vmw_shader_arg
  759. *
  760. * @handle: Handle identifying the shader to destroy.
  761. *
  762. * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
  763. */
  764. struct drm_vmw_shader_arg {
  765. uint32_t handle;
  766. uint32_t pad64;
  767. };
  768. /*************************************************************************/
  769. /**
  770. * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
  771. *
  772. * Allocates a surface handle and queues a create surface command
  773. * for the host on the first use of the surface. The surface ID can
  774. * be used as the surface ID in commands referencing the surface.
  775. */
  776. /**
  777. * enum drm_vmw_surface_flags
  778. *
  779. * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
  780. * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
  781. * surface.
  782. * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
  783. * given.
  784. */
  785. enum drm_vmw_surface_flags {
  786. drm_vmw_surface_flag_shareable = (1 << 0),
  787. drm_vmw_surface_flag_scanout = (1 << 1),
  788. drm_vmw_surface_flag_create_buffer = (1 << 2)
  789. };
  790. /**
  791. * struct drm_vmw_gb_surface_create_req
  792. *
  793. * @svga3d_flags: SVGA3d surface flags for the device.
  794. * @format: SVGA3d format.
  795. * @mip_level: Number of mip levels for all faces.
  796. * @drm_surface_flags Flags as described above.
  797. * @multisample_count Future use. Set to 0.
  798. * @autogen_filter Future use. Set to 0.
  799. * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
  800. * if none.
  801. * @base_size Size of the base mip level for all faces.
  802. *
  803. * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
  804. * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
  805. */
  806. struct drm_vmw_gb_surface_create_req {
  807. uint32_t svga3d_flags;
  808. uint32_t format;
  809. uint32_t mip_levels;
  810. enum drm_vmw_surface_flags drm_surface_flags;
  811. uint32_t multisample_count;
  812. uint32_t autogen_filter;
  813. uint32_t buffer_handle;
  814. uint32_t pad64;
  815. struct drm_vmw_size base_size;
  816. };
  817. /**
  818. * struct drm_vmw_gb_surface_create_rep
  819. *
  820. * @handle: Surface handle.
  821. * @backup_size: Size of backup buffers for this surface.
  822. * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
  823. * @buffer_size: Actual size of the buffer identified by
  824. * @buffer_handle
  825. * @buffer_map_handle: Offset into device address space for the buffer
  826. * identified by @buffer_handle.
  827. *
  828. * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
  829. * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
  830. */
  831. struct drm_vmw_gb_surface_create_rep {
  832. uint32_t handle;
  833. uint32_t backup_size;
  834. uint32_t buffer_handle;
  835. uint32_t buffer_size;
  836. uint64_t buffer_map_handle;
  837. };
  838. /**
  839. * union drm_vmw_gb_surface_create_arg
  840. *
  841. * @req: Input argument as described above.
  842. * @rep: Output argument as described above.
  843. *
  844. * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
  845. */
  846. union drm_vmw_gb_surface_create_arg {
  847. struct drm_vmw_gb_surface_create_rep rep;
  848. struct drm_vmw_gb_surface_create_req req;
  849. };
  850. /*************************************************************************/
  851. /**
  852. * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
  853. *
  854. * Puts a reference on a host surface with a given handle, as previously
  855. * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
  856. * A reference will make sure the surface isn't destroyed while we hold
  857. * it and will allow the calling client to use the surface handle in
  858. * the command stream.
  859. *
  860. * On successful return, the Ioctl returns the surface information given
  861. * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
  862. */
  863. /**
  864. * struct drm_vmw_gb_surface_reference_arg
  865. *
  866. * @creq: The data used as input when the surface was created, as described
  867. * above at "struct drm_vmw_gb_surface_create_req"
  868. * @crep: Additional data output when the surface was created, as described
  869. * above at "struct drm_vmw_gb_surface_create_rep"
  870. *
  871. * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
  872. */
  873. struct drm_vmw_gb_surface_ref_rep {
  874. struct drm_vmw_gb_surface_create_req creq;
  875. struct drm_vmw_gb_surface_create_rep crep;
  876. };
  877. /**
  878. * union drm_vmw_gb_surface_reference_arg
  879. *
  880. * @req: Input data as described above at "struct drm_vmw_surface_arg"
  881. * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
  882. *
  883. * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
  884. */
  885. union drm_vmw_gb_surface_reference_arg {
  886. struct drm_vmw_gb_surface_ref_rep rep;
  887. struct drm_vmw_surface_arg req;
  888. };
  889. /*************************************************************************/
  890. /**
  891. * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
  892. *
  893. * Idles any previously submitted GPU operations on the buffer and
  894. * by default blocks command submissions that reference the buffer.
  895. * If the file descriptor used to grab a blocking CPU sync is closed, the
  896. * cpu sync is released.
  897. * The flags argument indicates how the grab / release operation should be
  898. * performed:
  899. */
  900. /**
  901. * enum drm_vmw_synccpu_flags - Synccpu flags:
  902. *
  903. * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
  904. * hint to the kernel to allow command submissions that references the buffer
  905. * for read-only.
  906. * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
  907. * referencing this buffer.
  908. * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
  909. * -EBUSY should the buffer be busy.
  910. * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
  911. * while the buffer is synced for CPU. This is similar to the GEM bo idle
  912. * behavior.
  913. */
  914. enum drm_vmw_synccpu_flags {
  915. drm_vmw_synccpu_read = (1 << 0),
  916. drm_vmw_synccpu_write = (1 << 1),
  917. drm_vmw_synccpu_dontblock = (1 << 2),
  918. drm_vmw_synccpu_allow_cs = (1 << 3)
  919. };
  920. /**
  921. * enum drm_vmw_synccpu_op - Synccpu operations:
  922. *
  923. * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
  924. * @drm_vmw_synccpu_release: Release a previous grab.
  925. */
  926. enum drm_vmw_synccpu_op {
  927. drm_vmw_synccpu_grab,
  928. drm_vmw_synccpu_release
  929. };
  930. /**
  931. * struct drm_vmw_synccpu_arg
  932. *
  933. * @op: The synccpu operation as described above.
  934. * @handle: Handle identifying the buffer object.
  935. * @flags: Flags as described above.
  936. */
  937. struct drm_vmw_synccpu_arg {
  938. enum drm_vmw_synccpu_op op;
  939. enum drm_vmw_synccpu_flags flags;
  940. uint32_t handle;
  941. uint32_t pad64;
  942. };
  943. #endif