vmwgfx_drv.h 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #ifndef _VMWGFX_DRV_H_
  28. #define _VMWGFX_DRV_H_
  29. #include "vmwgfx_reg.h"
  30. #include <drm/drmP.h>
  31. #include <drm/vmwgfx_drm.h>
  32. #include <drm/drm_hashtab.h>
  33. #include <linux/suspend.h>
  34. #include <drm/ttm/ttm_bo_driver.h>
  35. #include <drm/ttm/ttm_object.h>
  36. #include <drm/ttm/ttm_lock.h>
  37. #include <drm/ttm/ttm_execbuf_util.h>
  38. #include <drm/ttm/ttm_module.h>
  39. #include "vmwgfx_fence.h"
  40. #define VMWGFX_DRIVER_DATE "20160210"
  41. #define VMWGFX_DRIVER_MAJOR 2
  42. #define VMWGFX_DRIVER_MINOR 10
  43. #define VMWGFX_DRIVER_PATCHLEVEL 0
  44. #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
  45. #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
  46. #define VMWGFX_MAX_RELOCATIONS 2048
  47. #define VMWGFX_MAX_VALIDATIONS 2048
  48. #define VMWGFX_MAX_DISPLAYS 16
  49. #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
  50. #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
  51. /*
  52. * Perhaps we should have sysfs entries for these.
  53. */
  54. #define VMWGFX_NUM_GB_CONTEXT 256
  55. #define VMWGFX_NUM_GB_SHADER 20000
  56. #define VMWGFX_NUM_GB_SURFACE 32768
  57. #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
  58. #define VMWGFX_NUM_DXCONTEXT 256
  59. #define VMWGFX_NUM_DXQUERY 512
  60. #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
  61. VMWGFX_NUM_GB_SHADER +\
  62. VMWGFX_NUM_GB_SURFACE +\
  63. VMWGFX_NUM_GB_SCREEN_TARGET)
  64. #define VMW_PL_GMR TTM_PL_PRIV0
  65. #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
  66. #define VMW_PL_MOB TTM_PL_PRIV1
  67. #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
  68. #define VMW_RES_CONTEXT ttm_driver_type0
  69. #define VMW_RES_SURFACE ttm_driver_type1
  70. #define VMW_RES_STREAM ttm_driver_type2
  71. #define VMW_RES_FENCE ttm_driver_type3
  72. #define VMW_RES_SHADER ttm_driver_type4
  73. struct vmw_fpriv {
  74. struct drm_master *locked_master;
  75. struct ttm_object_file *tfile;
  76. bool gb_aware;
  77. };
  78. struct vmw_dma_buffer {
  79. struct ttm_buffer_object base;
  80. struct list_head res_list;
  81. s32 pin_count;
  82. /* Not ref-counted. Protected by binding_mutex */
  83. struct vmw_resource *dx_query_ctx;
  84. };
  85. /**
  86. * struct vmw_validate_buffer - Carries validation info about buffers.
  87. *
  88. * @base: Validation info for TTM.
  89. * @hash: Hash entry for quick lookup of the TTM buffer object.
  90. *
  91. * This structure contains also driver private validation info
  92. * on top of the info needed by TTM.
  93. */
  94. struct vmw_validate_buffer {
  95. struct ttm_validate_buffer base;
  96. struct drm_hash_item hash;
  97. bool validate_as_mob;
  98. };
  99. struct vmw_res_func;
  100. struct vmw_resource {
  101. struct kref kref;
  102. struct vmw_private *dev_priv;
  103. int id;
  104. bool avail;
  105. unsigned long backup_size;
  106. bool res_dirty; /* Protected by backup buffer reserved */
  107. bool backup_dirty; /* Protected by backup buffer reserved */
  108. struct vmw_dma_buffer *backup;
  109. unsigned long backup_offset;
  110. unsigned long pin_count; /* Protected by resource reserved */
  111. const struct vmw_res_func *func;
  112. struct list_head lru_head; /* Protected by the resource lock */
  113. struct list_head mob_head; /* Protected by @backup reserved */
  114. struct list_head binding_head; /* Protected by binding_mutex */
  115. void (*res_free) (struct vmw_resource *res);
  116. void (*hw_destroy) (struct vmw_resource *res);
  117. };
  118. /*
  119. * Resources that are managed using ioctls.
  120. */
  121. enum vmw_res_type {
  122. vmw_res_context,
  123. vmw_res_surface,
  124. vmw_res_stream,
  125. vmw_res_shader,
  126. vmw_res_dx_context,
  127. vmw_res_cotable,
  128. vmw_res_view,
  129. vmw_res_max
  130. };
  131. /*
  132. * Resources that are managed using command streams.
  133. */
  134. enum vmw_cmdbuf_res_type {
  135. vmw_cmdbuf_res_shader,
  136. vmw_cmdbuf_res_view
  137. };
  138. struct vmw_cmdbuf_res_manager;
  139. struct vmw_cursor_snooper {
  140. struct drm_crtc *crtc;
  141. size_t age;
  142. uint32_t *image;
  143. };
  144. struct vmw_framebuffer;
  145. struct vmw_surface_offset;
  146. struct vmw_surface {
  147. struct vmw_resource res;
  148. uint32_t flags;
  149. uint32_t format;
  150. uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
  151. struct drm_vmw_size base_size;
  152. struct drm_vmw_size *sizes;
  153. uint32_t num_sizes;
  154. bool scanout;
  155. uint32_t array_size;
  156. /* TODO so far just a extra pointer */
  157. struct vmw_cursor_snooper snooper;
  158. struct vmw_surface_offset *offsets;
  159. SVGA3dTextureFilter autogen_filter;
  160. uint32_t multisample_count;
  161. struct list_head view_list;
  162. };
  163. struct vmw_marker_queue {
  164. struct list_head head;
  165. u64 lag;
  166. u64 lag_time;
  167. spinlock_t lock;
  168. };
  169. struct vmw_fifo_state {
  170. unsigned long reserved_size;
  171. u32 *dynamic_buffer;
  172. u32 *static_buffer;
  173. unsigned long static_buffer_size;
  174. bool using_bounce_buffer;
  175. uint32_t capabilities;
  176. struct mutex fifo_mutex;
  177. struct rw_semaphore rwsem;
  178. struct vmw_marker_queue marker_queue;
  179. bool dx;
  180. };
  181. struct vmw_relocation {
  182. SVGAMobId *mob_loc;
  183. SVGAGuestPtr *location;
  184. uint32_t index;
  185. };
  186. /**
  187. * struct vmw_res_cache_entry - resource information cache entry
  188. *
  189. * @valid: Whether the entry is valid, which also implies that the execbuf
  190. * code holds a reference to the resource, and it's placed on the
  191. * validation list.
  192. * @handle: User-space handle of a resource.
  193. * @res: Non-ref-counted pointer to the resource.
  194. *
  195. * Used to avoid frequent repeated user-space handle lookups of the
  196. * same resource.
  197. */
  198. struct vmw_res_cache_entry {
  199. bool valid;
  200. uint32_t handle;
  201. struct vmw_resource *res;
  202. struct vmw_resource_val_node *node;
  203. };
  204. /**
  205. * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
  206. */
  207. enum vmw_dma_map_mode {
  208. vmw_dma_phys, /* Use physical page addresses */
  209. vmw_dma_alloc_coherent, /* Use TTM coherent pages */
  210. vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
  211. vmw_dma_map_bind, /* Unmap from DMA just before unbind */
  212. vmw_dma_map_max
  213. };
  214. /**
  215. * struct vmw_sg_table - Scatter/gather table for binding, with additional
  216. * device-specific information.
  217. *
  218. * @sgt: Pointer to a struct sg_table with binding information
  219. * @num_regions: Number of regions with device-address contiguous pages
  220. */
  221. struct vmw_sg_table {
  222. enum vmw_dma_map_mode mode;
  223. struct page **pages;
  224. const dma_addr_t *addrs;
  225. struct sg_table *sgt;
  226. unsigned long num_regions;
  227. unsigned long num_pages;
  228. };
  229. /**
  230. * struct vmw_piter - Page iterator that iterates over a list of pages
  231. * and DMA addresses that could be either a scatter-gather list or
  232. * arrays
  233. *
  234. * @pages: Array of page pointers to the pages.
  235. * @addrs: DMA addresses to the pages if coherent pages are used.
  236. * @iter: Scatter-gather page iterator. Current position in SG list.
  237. * @i: Current position in arrays.
  238. * @num_pages: Number of pages total.
  239. * @next: Function to advance the iterator. Returns false if past the list
  240. * of pages, true otherwise.
  241. * @dma_address: Function to return the DMA address of the current page.
  242. */
  243. struct vmw_piter {
  244. struct page **pages;
  245. const dma_addr_t *addrs;
  246. struct sg_page_iter iter;
  247. unsigned long i;
  248. unsigned long num_pages;
  249. bool (*next)(struct vmw_piter *);
  250. dma_addr_t (*dma_address)(struct vmw_piter *);
  251. struct page *(*page)(struct vmw_piter *);
  252. };
  253. /*
  254. * enum vmw_display_unit_type - Describes the display unit
  255. */
  256. enum vmw_display_unit_type {
  257. vmw_du_invalid = 0,
  258. vmw_du_legacy,
  259. vmw_du_screen_object,
  260. vmw_du_screen_target
  261. };
  262. struct vmw_sw_context{
  263. struct drm_open_hash res_ht;
  264. bool res_ht_initialized;
  265. bool kernel; /**< is the called made from the kernel */
  266. struct vmw_fpriv *fp;
  267. struct list_head validate_nodes;
  268. struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
  269. uint32_t cur_reloc;
  270. struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
  271. uint32_t cur_val_buf;
  272. uint32_t *cmd_bounce;
  273. uint32_t cmd_bounce_size;
  274. struct list_head resource_list;
  275. struct list_head ctx_resource_list; /* For contexts and cotables */
  276. struct vmw_dma_buffer *cur_query_bo;
  277. struct list_head res_relocations;
  278. uint32_t *buf_start;
  279. struct vmw_res_cache_entry res_cache[vmw_res_max];
  280. struct vmw_resource *last_query_ctx;
  281. bool needs_post_query_barrier;
  282. struct vmw_resource *error_resource;
  283. struct vmw_ctx_binding_state *staged_bindings;
  284. bool staged_bindings_inuse;
  285. struct list_head staged_cmd_res;
  286. struct vmw_resource_val_node *dx_ctx_node;
  287. struct vmw_dma_buffer *dx_query_mob;
  288. struct vmw_resource *dx_query_ctx;
  289. struct vmw_cmdbuf_res_manager *man;
  290. };
  291. struct vmw_legacy_display;
  292. struct vmw_overlay;
  293. struct vmw_master {
  294. struct ttm_lock lock;
  295. };
  296. struct vmw_vga_topology_state {
  297. uint32_t width;
  298. uint32_t height;
  299. uint32_t primary;
  300. uint32_t pos_x;
  301. uint32_t pos_y;
  302. };
  303. /*
  304. * struct vmw_otable - Guest Memory OBject table metadata
  305. *
  306. * @size: Size of the table (page-aligned).
  307. * @page_table: Pointer to a struct vmw_mob holding the page table.
  308. */
  309. struct vmw_otable {
  310. unsigned long size;
  311. struct vmw_mob *page_table;
  312. bool enabled;
  313. };
  314. struct vmw_otable_batch {
  315. unsigned num_otables;
  316. struct vmw_otable *otables;
  317. struct vmw_resource *context;
  318. struct ttm_buffer_object *otable_bo;
  319. };
  320. struct vmw_private {
  321. struct ttm_bo_device bdev;
  322. struct ttm_bo_global_ref bo_global_ref;
  323. struct drm_global_reference mem_global_ref;
  324. struct vmw_fifo_state fifo;
  325. struct drm_device *dev;
  326. unsigned long vmw_chipset;
  327. unsigned int io_start;
  328. uint32_t vram_start;
  329. uint32_t vram_size;
  330. uint32_t prim_bb_mem;
  331. uint32_t mmio_start;
  332. uint32_t mmio_size;
  333. uint32_t fb_max_width;
  334. uint32_t fb_max_height;
  335. uint32_t texture_max_width;
  336. uint32_t texture_max_height;
  337. uint32_t stdu_max_width;
  338. uint32_t stdu_max_height;
  339. uint32_t initial_width;
  340. uint32_t initial_height;
  341. u32 *mmio_virt;
  342. uint32_t capabilities;
  343. uint32_t max_gmr_ids;
  344. uint32_t max_gmr_pages;
  345. uint32_t max_mob_pages;
  346. uint32_t max_mob_size;
  347. uint32_t memory_size;
  348. bool has_gmr;
  349. bool has_mob;
  350. spinlock_t hw_lock;
  351. spinlock_t cap_lock;
  352. bool has_dx;
  353. /*
  354. * VGA registers.
  355. */
  356. struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
  357. uint32_t vga_width;
  358. uint32_t vga_height;
  359. uint32_t vga_bpp;
  360. uint32_t vga_bpl;
  361. uint32_t vga_pitchlock;
  362. uint32_t num_displays;
  363. /*
  364. * Framebuffer info.
  365. */
  366. void *fb_info;
  367. enum vmw_display_unit_type active_display_unit;
  368. struct vmw_legacy_display *ldu_priv;
  369. struct vmw_overlay *overlay_priv;
  370. struct drm_property *hotplug_mode_update_property;
  371. struct drm_property *implicit_placement_property;
  372. unsigned num_implicit;
  373. struct vmw_framebuffer *implicit_fb;
  374. /*
  375. * Context and surface management.
  376. */
  377. rwlock_t resource_lock;
  378. struct idr res_idr[vmw_res_max];
  379. /*
  380. * Block lastclose from racing with firstopen.
  381. */
  382. struct mutex init_mutex;
  383. /*
  384. * A resource manager for kernel-only surfaces and
  385. * contexts.
  386. */
  387. struct ttm_object_device *tdev;
  388. /*
  389. * Fencing and IRQs.
  390. */
  391. atomic_t marker_seq;
  392. wait_queue_head_t fence_queue;
  393. wait_queue_head_t fifo_queue;
  394. spinlock_t waiter_lock;
  395. int fence_queue_waiters; /* Protected by waiter_lock */
  396. int goal_queue_waiters; /* Protected by waiter_lock */
  397. int cmdbuf_waiters; /* Protected by waiter_lock */
  398. int error_waiters; /* Protected by waiter_lock */
  399. int fifo_queue_waiters; /* Protected by waiter_lock */
  400. uint32_t last_read_seqno;
  401. struct vmw_fence_manager *fman;
  402. uint32_t irq_mask; /* Updates protected by waiter_lock */
  403. /*
  404. * Device state
  405. */
  406. uint32_t traces_state;
  407. uint32_t enable_state;
  408. uint32_t config_done_state;
  409. /**
  410. * Execbuf
  411. */
  412. /**
  413. * Protected by the cmdbuf mutex.
  414. */
  415. struct vmw_sw_context ctx;
  416. struct mutex cmdbuf_mutex;
  417. struct mutex binding_mutex;
  418. /**
  419. * Operating mode.
  420. */
  421. bool stealth;
  422. bool enable_fb;
  423. spinlock_t svga_lock;
  424. /**
  425. * Master management.
  426. */
  427. struct vmw_master *active_master;
  428. struct vmw_master fbdev_master;
  429. struct notifier_block pm_nb;
  430. bool suspended;
  431. bool refuse_hibernation;
  432. struct mutex release_mutex;
  433. atomic_t num_fifo_resources;
  434. /*
  435. * Replace this with an rwsem as soon as we have down_xx_interruptible()
  436. */
  437. struct ttm_lock reservation_sem;
  438. /*
  439. * Query processing. These members
  440. * are protected by the cmdbuf mutex.
  441. */
  442. struct vmw_dma_buffer *dummy_query_bo;
  443. struct vmw_dma_buffer *pinned_bo;
  444. uint32_t query_cid;
  445. uint32_t query_cid_valid;
  446. bool dummy_query_bo_pinned;
  447. /*
  448. * Surface swapping. The "surface_lru" list is protected by the
  449. * resource lock in order to be able to destroy a surface and take
  450. * it off the lru atomically. "used_memory_size" is currently
  451. * protected by the cmdbuf mutex for simplicity.
  452. */
  453. struct list_head res_lru[vmw_res_max];
  454. uint32_t used_memory_size;
  455. /*
  456. * DMA mapping stuff.
  457. */
  458. enum vmw_dma_map_mode map_mode;
  459. /*
  460. * Guest Backed stuff
  461. */
  462. struct vmw_otable_batch otable_batch;
  463. struct vmw_cmdbuf_man *cman;
  464. };
  465. static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
  466. {
  467. return container_of(res, struct vmw_surface, res);
  468. }
  469. static inline struct vmw_private *vmw_priv(struct drm_device *dev)
  470. {
  471. return (struct vmw_private *)dev->dev_private;
  472. }
  473. static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
  474. {
  475. return (struct vmw_fpriv *)file_priv->driver_priv;
  476. }
  477. static inline struct vmw_master *vmw_master(struct drm_master *master)
  478. {
  479. return (struct vmw_master *) master->driver_priv;
  480. }
  481. /*
  482. * The locking here is fine-grained, so that it is performed once
  483. * for every read- and write operation. This is of course costly, but we
  484. * don't perform much register access in the timing critical paths anyway.
  485. * Instead we have the extra benefit of being sure that we don't forget
  486. * the hw lock around register accesses.
  487. */
  488. static inline void vmw_write(struct vmw_private *dev_priv,
  489. unsigned int offset, uint32_t value)
  490. {
  491. unsigned long irq_flags;
  492. spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
  493. outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
  494. outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
  495. spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
  496. }
  497. static inline uint32_t vmw_read(struct vmw_private *dev_priv,
  498. unsigned int offset)
  499. {
  500. unsigned long irq_flags;
  501. u32 val;
  502. spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
  503. outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
  504. val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
  505. spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
  506. return val;
  507. }
  508. extern void vmw_svga_enable(struct vmw_private *dev_priv);
  509. extern void vmw_svga_disable(struct vmw_private *dev_priv);
  510. /**
  511. * GMR utilities - vmwgfx_gmr.c
  512. */
  513. extern int vmw_gmr_bind(struct vmw_private *dev_priv,
  514. const struct vmw_sg_table *vsgt,
  515. unsigned long num_pages,
  516. int gmr_id);
  517. extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
  518. /**
  519. * Resource utilities - vmwgfx_resource.c
  520. */
  521. struct vmw_user_resource_conv;
  522. extern void vmw_resource_unreference(struct vmw_resource **p_res);
  523. extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
  524. extern struct vmw_resource *
  525. vmw_resource_reference_unless_doomed(struct vmw_resource *res);
  526. extern int vmw_resource_validate(struct vmw_resource *res);
  527. extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
  528. bool no_backup);
  529. extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
  530. extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
  531. struct ttm_object_file *tfile,
  532. uint32_t handle,
  533. struct vmw_surface **out_surf,
  534. struct vmw_dma_buffer **out_buf);
  535. extern int vmw_user_resource_lookup_handle(
  536. struct vmw_private *dev_priv,
  537. struct ttm_object_file *tfile,
  538. uint32_t handle,
  539. const struct vmw_user_resource_conv *converter,
  540. struct vmw_resource **p_res);
  541. extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
  542. extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
  543. struct vmw_dma_buffer *vmw_bo,
  544. size_t size, struct ttm_placement *placement,
  545. bool interuptable,
  546. void (*bo_free) (struct ttm_buffer_object *bo));
  547. extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
  548. struct ttm_object_file *tfile);
  549. extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
  550. struct ttm_object_file *tfile,
  551. uint32_t size,
  552. bool shareable,
  553. uint32_t *handle,
  554. struct vmw_dma_buffer **p_dma_buf,
  555. struct ttm_base_object **p_base);
  556. extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
  557. struct vmw_dma_buffer *dma_buf,
  558. uint32_t *handle);
  559. extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
  560. struct drm_file *file_priv);
  561. extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
  562. struct drm_file *file_priv);
  563. extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
  564. struct drm_file *file_priv);
  565. extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
  566. uint32_t cur_validate_node);
  567. extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
  568. extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
  569. uint32_t id, struct vmw_dma_buffer **out,
  570. struct ttm_base_object **base);
  571. extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
  572. struct drm_file *file_priv);
  573. extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
  574. struct drm_file *file_priv);
  575. extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
  576. struct ttm_object_file *tfile,
  577. uint32_t *inout_id,
  578. struct vmw_resource **out);
  579. extern void vmw_resource_unreserve(struct vmw_resource *res,
  580. bool switch_backup,
  581. struct vmw_dma_buffer *new_backup,
  582. unsigned long new_backup_offset);
  583. extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
  584. struct ttm_mem_reg *mem);
  585. extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
  586. struct ttm_mem_reg *mem);
  587. extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
  588. extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
  589. struct vmw_fence_obj *fence);
  590. extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
  591. /**
  592. * DMA buffer helper routines - vmwgfx_dmabuf.c
  593. */
  594. extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
  595. struct vmw_dma_buffer *bo,
  596. struct ttm_placement *placement,
  597. bool interruptible);
  598. extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
  599. struct vmw_dma_buffer *buf,
  600. bool interruptible);
  601. extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
  602. struct vmw_dma_buffer *buf,
  603. bool interruptible);
  604. extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
  605. struct vmw_dma_buffer *bo,
  606. bool interruptible);
  607. extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
  608. struct vmw_dma_buffer *bo,
  609. bool interruptible);
  610. extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
  611. SVGAGuestPtr *ptr);
  612. extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
  613. /**
  614. * Misc Ioctl functionality - vmwgfx_ioctl.c
  615. */
  616. extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
  617. struct drm_file *file_priv);
  618. extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
  619. struct drm_file *file_priv);
  620. extern int vmw_present_ioctl(struct drm_device *dev, void *data,
  621. struct drm_file *file_priv);
  622. extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
  623. struct drm_file *file_priv);
  624. extern unsigned int vmw_fops_poll(struct file *filp,
  625. struct poll_table_struct *wait);
  626. extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
  627. size_t count, loff_t *offset);
  628. /**
  629. * Fifo utilities - vmwgfx_fifo.c
  630. */
  631. extern int vmw_fifo_init(struct vmw_private *dev_priv,
  632. struct vmw_fifo_state *fifo);
  633. extern void vmw_fifo_release(struct vmw_private *dev_priv,
  634. struct vmw_fifo_state *fifo);
  635. extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
  636. extern void *
  637. vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
  638. extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
  639. extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
  640. extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
  641. uint32_t *seqno);
  642. extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
  643. extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
  644. extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
  645. extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
  646. extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
  647. uint32_t cid);
  648. extern int vmw_fifo_flush(struct vmw_private *dev_priv,
  649. bool interruptible);
  650. /**
  651. * TTM glue - vmwgfx_ttm_glue.c
  652. */
  653. extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
  654. extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
  655. extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
  656. /**
  657. * TTM buffer object driver - vmwgfx_buffer.c
  658. */
  659. extern const size_t vmw_tt_size;
  660. extern struct ttm_placement vmw_vram_placement;
  661. extern struct ttm_placement vmw_vram_ne_placement;
  662. extern struct ttm_placement vmw_vram_sys_placement;
  663. extern struct ttm_placement vmw_vram_gmr_placement;
  664. extern struct ttm_placement vmw_vram_gmr_ne_placement;
  665. extern struct ttm_placement vmw_sys_placement;
  666. extern struct ttm_placement vmw_sys_ne_placement;
  667. extern struct ttm_placement vmw_evictable_placement;
  668. extern struct ttm_placement vmw_srf_placement;
  669. extern struct ttm_placement vmw_mob_placement;
  670. extern struct ttm_placement vmw_mob_ne_placement;
  671. extern struct ttm_bo_driver vmw_bo_driver;
  672. extern int vmw_dma_quiescent(struct drm_device *dev);
  673. extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
  674. extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
  675. extern const struct vmw_sg_table *
  676. vmw_bo_sg_table(struct ttm_buffer_object *bo);
  677. extern void vmw_piter_start(struct vmw_piter *viter,
  678. const struct vmw_sg_table *vsgt,
  679. unsigned long p_offs);
  680. /**
  681. * vmw_piter_next - Advance the iterator one page.
  682. *
  683. * @viter: Pointer to the iterator to advance.
  684. *
  685. * Returns false if past the list of pages, true otherwise.
  686. */
  687. static inline bool vmw_piter_next(struct vmw_piter *viter)
  688. {
  689. return viter->next(viter);
  690. }
  691. /**
  692. * vmw_piter_dma_addr - Return the DMA address of the current page.
  693. *
  694. * @viter: Pointer to the iterator
  695. *
  696. * Returns the DMA address of the page pointed to by @viter.
  697. */
  698. static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
  699. {
  700. return viter->dma_address(viter);
  701. }
  702. /**
  703. * vmw_piter_page - Return a pointer to the current page.
  704. *
  705. * @viter: Pointer to the iterator
  706. *
  707. * Returns the DMA address of the page pointed to by @viter.
  708. */
  709. static inline struct page *vmw_piter_page(struct vmw_piter *viter)
  710. {
  711. return viter->page(viter);
  712. }
  713. /**
  714. * Command submission - vmwgfx_execbuf.c
  715. */
  716. extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
  717. struct drm_file *file_priv, size_t size);
  718. extern int vmw_execbuf_process(struct drm_file *file_priv,
  719. struct vmw_private *dev_priv,
  720. void __user *user_commands,
  721. void *kernel_commands,
  722. uint32_t command_size,
  723. uint64_t throttle_us,
  724. uint32_t dx_context_handle,
  725. struct drm_vmw_fence_rep __user
  726. *user_fence_rep,
  727. struct vmw_fence_obj **out_fence);
  728. extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
  729. struct vmw_fence_obj *fence);
  730. extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
  731. extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
  732. struct vmw_private *dev_priv,
  733. struct vmw_fence_obj **p_fence,
  734. uint32_t *p_handle);
  735. extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
  736. struct vmw_fpriv *vmw_fp,
  737. int ret,
  738. struct drm_vmw_fence_rep __user
  739. *user_fence_rep,
  740. struct vmw_fence_obj *fence,
  741. uint32_t fence_handle);
  742. extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
  743. struct ttm_buffer_object *bo,
  744. bool interruptible,
  745. bool validate_as_mob);
  746. /**
  747. * IRQs and wating - vmwgfx_irq.c
  748. */
  749. extern irqreturn_t vmw_irq_handler(int irq, void *arg);
  750. extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
  751. uint32_t seqno, bool interruptible,
  752. unsigned long timeout);
  753. extern void vmw_irq_preinstall(struct drm_device *dev);
  754. extern int vmw_irq_postinstall(struct drm_device *dev);
  755. extern void vmw_irq_uninstall(struct drm_device *dev);
  756. extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
  757. uint32_t seqno);
  758. extern int vmw_fallback_wait(struct vmw_private *dev_priv,
  759. bool lazy,
  760. bool fifo_idle,
  761. uint32_t seqno,
  762. bool interruptible,
  763. unsigned long timeout);
  764. extern void vmw_update_seqno(struct vmw_private *dev_priv,
  765. struct vmw_fifo_state *fifo_state);
  766. extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
  767. extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
  768. extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
  769. extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
  770. extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
  771. int *waiter_count);
  772. extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
  773. u32 flag, int *waiter_count);
  774. /**
  775. * Rudimentary fence-like objects currently used only for throttling -
  776. * vmwgfx_marker.c
  777. */
  778. extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
  779. extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
  780. extern int vmw_marker_push(struct vmw_marker_queue *queue,
  781. uint32_t seqno);
  782. extern int vmw_marker_pull(struct vmw_marker_queue *queue,
  783. uint32_t signaled_seqno);
  784. extern int vmw_wait_lag(struct vmw_private *dev_priv,
  785. struct vmw_marker_queue *queue, uint32_t us);
  786. /**
  787. * Kernel framebuffer - vmwgfx_fb.c
  788. */
  789. int vmw_fb_init(struct vmw_private *vmw_priv);
  790. int vmw_fb_close(struct vmw_private *dev_priv);
  791. int vmw_fb_off(struct vmw_private *vmw_priv);
  792. int vmw_fb_on(struct vmw_private *vmw_priv);
  793. /**
  794. * Kernel modesetting - vmwgfx_kms.c
  795. */
  796. int vmw_kms_init(struct vmw_private *dev_priv);
  797. int vmw_kms_close(struct vmw_private *dev_priv);
  798. int vmw_kms_save_vga(struct vmw_private *vmw_priv);
  799. int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
  800. int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
  801. struct drm_file *file_priv);
  802. void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
  803. void vmw_kms_cursor_snoop(struct vmw_surface *srf,
  804. struct ttm_object_file *tfile,
  805. struct ttm_buffer_object *bo,
  806. SVGA3dCmdHeader *header);
  807. int vmw_kms_write_svga(struct vmw_private *vmw_priv,
  808. unsigned width, unsigned height, unsigned pitch,
  809. unsigned bpp, unsigned depth);
  810. void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
  811. bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
  812. uint32_t pitch,
  813. uint32_t height);
  814. u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
  815. int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
  816. void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
  817. int vmw_kms_present(struct vmw_private *dev_priv,
  818. struct drm_file *file_priv,
  819. struct vmw_framebuffer *vfb,
  820. struct vmw_surface *surface,
  821. uint32_t sid, int32_t destX, int32_t destY,
  822. struct drm_vmw_rect *clips,
  823. uint32_t num_clips);
  824. int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
  825. struct drm_file *file_priv);
  826. void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
  827. int vmw_dumb_create(struct drm_file *file_priv,
  828. struct drm_device *dev,
  829. struct drm_mode_create_dumb *args);
  830. int vmw_dumb_map_offset(struct drm_file *file_priv,
  831. struct drm_device *dev, uint32_t handle,
  832. uint64_t *offset);
  833. int vmw_dumb_destroy(struct drm_file *file_priv,
  834. struct drm_device *dev,
  835. uint32_t handle);
  836. extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
  837. extern void vmw_resource_unpin(struct vmw_resource *res);
  838. extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
  839. /**
  840. * Overlay control - vmwgfx_overlay.c
  841. */
  842. int vmw_overlay_init(struct vmw_private *dev_priv);
  843. int vmw_overlay_close(struct vmw_private *dev_priv);
  844. int vmw_overlay_ioctl(struct drm_device *dev, void *data,
  845. struct drm_file *file_priv);
  846. int vmw_overlay_stop_all(struct vmw_private *dev_priv);
  847. int vmw_overlay_resume_all(struct vmw_private *dev_priv);
  848. int vmw_overlay_pause_all(struct vmw_private *dev_priv);
  849. int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
  850. int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
  851. int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
  852. int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
  853. /**
  854. * GMR Id manager
  855. */
  856. extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
  857. /**
  858. * Prime - vmwgfx_prime.c
  859. */
  860. extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
  861. extern int vmw_prime_fd_to_handle(struct drm_device *dev,
  862. struct drm_file *file_priv,
  863. int fd, u32 *handle);
  864. extern int vmw_prime_handle_to_fd(struct drm_device *dev,
  865. struct drm_file *file_priv,
  866. uint32_t handle, uint32_t flags,
  867. int *prime_fd);
  868. /*
  869. * MemoryOBject management - vmwgfx_mob.c
  870. */
  871. struct vmw_mob;
  872. extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
  873. const struct vmw_sg_table *vsgt,
  874. unsigned long num_data_pages, int32_t mob_id);
  875. extern void vmw_mob_unbind(struct vmw_private *dev_priv,
  876. struct vmw_mob *mob);
  877. extern void vmw_mob_destroy(struct vmw_mob *mob);
  878. extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
  879. extern int vmw_otables_setup(struct vmw_private *dev_priv);
  880. extern void vmw_otables_takedown(struct vmw_private *dev_priv);
  881. /*
  882. * Context management - vmwgfx_context.c
  883. */
  884. extern const struct vmw_user_resource_conv *user_context_converter;
  885. extern int vmw_context_check(struct vmw_private *dev_priv,
  886. struct ttm_object_file *tfile,
  887. int id,
  888. struct vmw_resource **p_res);
  889. extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  890. struct drm_file *file_priv);
  891. extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
  892. struct drm_file *file_priv);
  893. extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  894. struct drm_file *file_priv);
  895. extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
  896. extern struct vmw_cmdbuf_res_manager *
  897. vmw_context_res_man(struct vmw_resource *ctx);
  898. extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
  899. SVGACOTableType cotable_type);
  900. extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
  901. struct vmw_ctx_binding_state;
  902. extern struct vmw_ctx_binding_state *
  903. vmw_context_binding_state(struct vmw_resource *ctx);
  904. extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
  905. bool readback);
  906. extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
  907. struct vmw_dma_buffer *mob);
  908. extern struct vmw_dma_buffer *
  909. vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
  910. /*
  911. * Surface management - vmwgfx_surface.c
  912. */
  913. extern const struct vmw_user_resource_conv *user_surface_converter;
  914. extern void vmw_surface_res_free(struct vmw_resource *res);
  915. extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  916. struct drm_file *file_priv);
  917. extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  918. struct drm_file *file_priv);
  919. extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  920. struct drm_file *file_priv);
  921. extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
  922. struct drm_file *file_priv);
  923. extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
  924. struct drm_file *file_priv);
  925. extern int vmw_surface_check(struct vmw_private *dev_priv,
  926. struct ttm_object_file *tfile,
  927. uint32_t handle, int *id);
  928. extern int vmw_surface_validate(struct vmw_private *dev_priv,
  929. struct vmw_surface *srf);
  930. int vmw_surface_gb_priv_define(struct drm_device *dev,
  931. uint32_t user_accounting_size,
  932. uint32_t svga3d_flags,
  933. SVGA3dSurfaceFormat format,
  934. bool for_scanout,
  935. uint32_t num_mip_levels,
  936. uint32_t multisample_count,
  937. uint32_t array_size,
  938. struct drm_vmw_size size,
  939. struct vmw_surface **srf_out);
  940. /*
  941. * Shader management - vmwgfx_shader.c
  942. */
  943. extern const struct vmw_user_resource_conv *user_shader_converter;
  944. extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
  945. struct drm_file *file_priv);
  946. extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
  947. struct drm_file *file_priv);
  948. extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
  949. struct vmw_cmdbuf_res_manager *man,
  950. u32 user_key, const void *bytecode,
  951. SVGA3dShaderType shader_type,
  952. size_t size,
  953. struct list_head *list);
  954. extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
  955. u32 user_key, SVGA3dShaderType shader_type,
  956. struct list_head *list);
  957. extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
  958. struct vmw_resource *ctx,
  959. u32 user_key,
  960. SVGA3dShaderType shader_type,
  961. struct list_head *list);
  962. extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
  963. struct list_head *list,
  964. bool readback);
  965. extern struct vmw_resource *
  966. vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
  967. u32 user_key, SVGA3dShaderType shader_type);
  968. /*
  969. * Command buffer managed resources - vmwgfx_cmdbuf_res.c
  970. */
  971. extern struct vmw_cmdbuf_res_manager *
  972. vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
  973. extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
  974. extern size_t vmw_cmdbuf_res_man_size(void);
  975. extern struct vmw_resource *
  976. vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
  977. enum vmw_cmdbuf_res_type res_type,
  978. u32 user_key);
  979. extern void vmw_cmdbuf_res_revert(struct list_head *list);
  980. extern void vmw_cmdbuf_res_commit(struct list_head *list);
  981. extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
  982. enum vmw_cmdbuf_res_type res_type,
  983. u32 user_key,
  984. struct vmw_resource *res,
  985. struct list_head *list);
  986. extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
  987. enum vmw_cmdbuf_res_type res_type,
  988. u32 user_key,
  989. struct list_head *list,
  990. struct vmw_resource **res);
  991. /*
  992. * COTable management - vmwgfx_cotable.c
  993. */
  994. extern const SVGACOTableType vmw_cotable_scrub_order[];
  995. extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
  996. struct vmw_resource *ctx,
  997. u32 type);
  998. extern int vmw_cotable_notify(struct vmw_resource *res, int id);
  999. extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
  1000. extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
  1001. struct list_head *head);
  1002. /*
  1003. * Command buffer managerment vmwgfx_cmdbuf.c
  1004. */
  1005. struct vmw_cmdbuf_man;
  1006. struct vmw_cmdbuf_header;
  1007. extern struct vmw_cmdbuf_man *
  1008. vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
  1009. extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
  1010. size_t size, size_t default_size);
  1011. extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
  1012. extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
  1013. extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
  1014. unsigned long timeout);
  1015. extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
  1016. int ctx_id, bool interruptible,
  1017. struct vmw_cmdbuf_header *header);
  1018. extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
  1019. struct vmw_cmdbuf_header *header,
  1020. bool flush);
  1021. extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
  1022. extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
  1023. size_t size, bool interruptible,
  1024. struct vmw_cmdbuf_header **p_header);
  1025. extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
  1026. extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
  1027. bool interruptible);
  1028. /**
  1029. * Inline helper functions
  1030. */
  1031. static inline void vmw_surface_unreference(struct vmw_surface **srf)
  1032. {
  1033. struct vmw_surface *tmp_srf = *srf;
  1034. struct vmw_resource *res = &tmp_srf->res;
  1035. *srf = NULL;
  1036. vmw_resource_unreference(&res);
  1037. }
  1038. static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
  1039. {
  1040. (void) vmw_resource_reference(&srf->res);
  1041. return srf;
  1042. }
  1043. static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
  1044. {
  1045. struct vmw_dma_buffer *tmp_buf = *buf;
  1046. *buf = NULL;
  1047. if (tmp_buf != NULL) {
  1048. struct ttm_buffer_object *bo = &tmp_buf->base;
  1049. ttm_bo_unref(&bo);
  1050. }
  1051. }
  1052. static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
  1053. {
  1054. if (ttm_bo_reference(&buf->base))
  1055. return buf;
  1056. return NULL;
  1057. }
  1058. static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
  1059. {
  1060. return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
  1061. }
  1062. static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
  1063. {
  1064. atomic_inc(&dev_priv->num_fifo_resources);
  1065. }
  1066. static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
  1067. {
  1068. atomic_dec(&dev_priv->num_fifo_resources);
  1069. }
  1070. /**
  1071. * vmw_mmio_read - Perform a MMIO read from volatile memory
  1072. *
  1073. * @addr: The address to read from
  1074. *
  1075. * This function is intended to be equivalent to ioread32() on
  1076. * memremap'd memory, but without byteswapping.
  1077. */
  1078. static inline u32 vmw_mmio_read(u32 *addr)
  1079. {
  1080. return READ_ONCE(*addr);
  1081. }
  1082. /**
  1083. * vmw_mmio_write - Perform a MMIO write to volatile memory
  1084. *
  1085. * @addr: The address to write to
  1086. *
  1087. * This function is intended to be equivalent to iowrite32 on
  1088. * memremap'd memory, but without byteswapping.
  1089. */
  1090. static inline void vmw_mmio_write(u32 value, u32 *addr)
  1091. {
  1092. WRITE_ONCE(*addr, value);
  1093. }
  1094. #endif