vmwgfx_drv.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include <linux/module.h>
  28. #include <linux/console.h>
  29. #include <drm/drmP.h>
  30. #include "vmwgfx_drv.h"
  31. #include "vmwgfx_binding.h"
  32. #include <drm/ttm/ttm_placement.h>
  33. #include <drm/ttm/ttm_bo_driver.h>
  34. #include <drm/ttm/ttm_object.h>
  35. #include <drm/ttm/ttm_module.h>
  36. #include <linux/dma_remapping.h>
  37. #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  38. #define VMWGFX_CHIP_SVGAII 0
  39. #define VMW_FB_RESERVATION 0
  40. #define VMW_MIN_INITIAL_WIDTH 800
  41. #define VMW_MIN_INITIAL_HEIGHT 600
  42. #ifndef VMWGFX_GIT_VERSION
  43. #define VMWGFX_GIT_VERSION "Unknown"
  44. #endif
  45. #define VMWGFX_REPO "In Tree"
  46. /**
  47. * Fully encoded drm commands. Might move to vmw_drm.h
  48. */
  49. #define DRM_IOCTL_VMW_GET_PARAM \
  50. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
  51. struct drm_vmw_getparam_arg)
  52. #define DRM_IOCTL_VMW_ALLOC_DMABUF \
  53. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
  54. union drm_vmw_alloc_dmabuf_arg)
  55. #define DRM_IOCTL_VMW_UNREF_DMABUF \
  56. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
  57. struct drm_vmw_unref_dmabuf_arg)
  58. #define DRM_IOCTL_VMW_CURSOR_BYPASS \
  59. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
  60. struct drm_vmw_cursor_bypass_arg)
  61. #define DRM_IOCTL_VMW_CONTROL_STREAM \
  62. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
  63. struct drm_vmw_control_stream_arg)
  64. #define DRM_IOCTL_VMW_CLAIM_STREAM \
  65. DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
  66. struct drm_vmw_stream_arg)
  67. #define DRM_IOCTL_VMW_UNREF_STREAM \
  68. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
  69. struct drm_vmw_stream_arg)
  70. #define DRM_IOCTL_VMW_CREATE_CONTEXT \
  71. DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
  72. struct drm_vmw_context_arg)
  73. #define DRM_IOCTL_VMW_UNREF_CONTEXT \
  74. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
  75. struct drm_vmw_context_arg)
  76. #define DRM_IOCTL_VMW_CREATE_SURFACE \
  77. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
  78. union drm_vmw_surface_create_arg)
  79. #define DRM_IOCTL_VMW_UNREF_SURFACE \
  80. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
  81. struct drm_vmw_surface_arg)
  82. #define DRM_IOCTL_VMW_REF_SURFACE \
  83. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
  84. union drm_vmw_surface_reference_arg)
  85. #define DRM_IOCTL_VMW_EXECBUF \
  86. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
  87. struct drm_vmw_execbuf_arg)
  88. #define DRM_IOCTL_VMW_GET_3D_CAP \
  89. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
  90. struct drm_vmw_get_3d_cap_arg)
  91. #define DRM_IOCTL_VMW_FENCE_WAIT \
  92. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
  93. struct drm_vmw_fence_wait_arg)
  94. #define DRM_IOCTL_VMW_FENCE_SIGNALED \
  95. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
  96. struct drm_vmw_fence_signaled_arg)
  97. #define DRM_IOCTL_VMW_FENCE_UNREF \
  98. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
  99. struct drm_vmw_fence_arg)
  100. #define DRM_IOCTL_VMW_FENCE_EVENT \
  101. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
  102. struct drm_vmw_fence_event_arg)
  103. #define DRM_IOCTL_VMW_PRESENT \
  104. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
  105. struct drm_vmw_present_arg)
  106. #define DRM_IOCTL_VMW_PRESENT_READBACK \
  107. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
  108. struct drm_vmw_present_readback_arg)
  109. #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
  110. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
  111. struct drm_vmw_update_layout_arg)
  112. #define DRM_IOCTL_VMW_CREATE_SHADER \
  113. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
  114. struct drm_vmw_shader_create_arg)
  115. #define DRM_IOCTL_VMW_UNREF_SHADER \
  116. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
  117. struct drm_vmw_shader_arg)
  118. #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
  119. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
  120. union drm_vmw_gb_surface_create_arg)
  121. #define DRM_IOCTL_VMW_GB_SURFACE_REF \
  122. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
  123. union drm_vmw_gb_surface_reference_arg)
  124. #define DRM_IOCTL_VMW_SYNCCPU \
  125. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
  126. struct drm_vmw_synccpu_arg)
  127. #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
  128. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
  129. struct drm_vmw_context_arg)
  130. /**
  131. * The core DRM version of this macro doesn't account for
  132. * DRM_COMMAND_BASE.
  133. */
  134. #define VMW_IOCTL_DEF(ioctl, func, flags) \
  135. [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
  136. /**
  137. * Ioctl definitions.
  138. */
  139. static const struct drm_ioctl_desc vmw_ioctls[] = {
  140. VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
  141. DRM_AUTH | DRM_RENDER_ALLOW),
  142. VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
  143. DRM_AUTH | DRM_RENDER_ALLOW),
  144. VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
  145. DRM_RENDER_ALLOW),
  146. VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
  147. vmw_kms_cursor_bypass_ioctl,
  148. DRM_MASTER | DRM_CONTROL_ALLOW),
  149. VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
  150. DRM_MASTER | DRM_CONTROL_ALLOW),
  151. VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
  152. DRM_MASTER | DRM_CONTROL_ALLOW),
  153. VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
  154. DRM_MASTER | DRM_CONTROL_ALLOW),
  155. VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
  156. DRM_AUTH | DRM_RENDER_ALLOW),
  157. VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
  158. DRM_RENDER_ALLOW),
  159. VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
  160. DRM_AUTH | DRM_RENDER_ALLOW),
  161. VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
  162. DRM_RENDER_ALLOW),
  163. VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
  164. DRM_AUTH | DRM_RENDER_ALLOW),
  165. VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
  166. DRM_RENDER_ALLOW),
  167. VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
  168. DRM_RENDER_ALLOW),
  169. VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
  170. vmw_fence_obj_signaled_ioctl,
  171. DRM_RENDER_ALLOW),
  172. VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
  173. DRM_RENDER_ALLOW),
  174. VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
  175. DRM_AUTH | DRM_RENDER_ALLOW),
  176. VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
  177. DRM_AUTH | DRM_RENDER_ALLOW),
  178. /* these allow direct access to the framebuffers mark as master only */
  179. VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
  180. DRM_MASTER | DRM_AUTH),
  181. VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
  182. vmw_present_readback_ioctl,
  183. DRM_MASTER | DRM_AUTH),
  184. /*
  185. * The permissions of the below ioctl are overridden in
  186. * vmw_generic_ioctl(). We require either
  187. * DRM_MASTER or capable(CAP_SYS_ADMIN).
  188. */
  189. VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
  190. vmw_kms_update_layout_ioctl,
  191. DRM_RENDER_ALLOW),
  192. VMW_IOCTL_DEF(VMW_CREATE_SHADER,
  193. vmw_shader_define_ioctl,
  194. DRM_AUTH | DRM_RENDER_ALLOW),
  195. VMW_IOCTL_DEF(VMW_UNREF_SHADER,
  196. vmw_shader_destroy_ioctl,
  197. DRM_RENDER_ALLOW),
  198. VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
  199. vmw_gb_surface_define_ioctl,
  200. DRM_AUTH | DRM_RENDER_ALLOW),
  201. VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
  202. vmw_gb_surface_reference_ioctl,
  203. DRM_AUTH | DRM_RENDER_ALLOW),
  204. VMW_IOCTL_DEF(VMW_SYNCCPU,
  205. vmw_user_dmabuf_synccpu_ioctl,
  206. DRM_RENDER_ALLOW),
  207. VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
  208. vmw_extended_context_define_ioctl,
  209. DRM_AUTH | DRM_RENDER_ALLOW),
  210. };
  211. static const struct pci_device_id vmw_pci_id_list[] = {
  212. {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
  213. {0, 0, 0}
  214. };
  215. MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
  216. static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
  217. static int vmw_force_iommu;
  218. static int vmw_restrict_iommu;
  219. static int vmw_force_coherent;
  220. static int vmw_restrict_dma_mask;
  221. static int vmw_assume_16bpp;
  222. static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
  223. static void vmw_master_init(struct vmw_master *);
  224. static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
  225. void *ptr);
  226. MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
  227. module_param_named(enable_fbdev, enable_fbdev, int, 0600);
  228. MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
  229. module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
  230. MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
  231. module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
  232. MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
  233. module_param_named(force_coherent, vmw_force_coherent, int, 0600);
  234. MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
  235. module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
  236. MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
  237. module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
  238. static void vmw_print_capabilities(uint32_t capabilities)
  239. {
  240. DRM_INFO("Capabilities:\n");
  241. if (capabilities & SVGA_CAP_RECT_COPY)
  242. DRM_INFO(" Rect copy.\n");
  243. if (capabilities & SVGA_CAP_CURSOR)
  244. DRM_INFO(" Cursor.\n");
  245. if (capabilities & SVGA_CAP_CURSOR_BYPASS)
  246. DRM_INFO(" Cursor bypass.\n");
  247. if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
  248. DRM_INFO(" Cursor bypass 2.\n");
  249. if (capabilities & SVGA_CAP_8BIT_EMULATION)
  250. DRM_INFO(" 8bit emulation.\n");
  251. if (capabilities & SVGA_CAP_ALPHA_CURSOR)
  252. DRM_INFO(" Alpha cursor.\n");
  253. if (capabilities & SVGA_CAP_3D)
  254. DRM_INFO(" 3D.\n");
  255. if (capabilities & SVGA_CAP_EXTENDED_FIFO)
  256. DRM_INFO(" Extended Fifo.\n");
  257. if (capabilities & SVGA_CAP_MULTIMON)
  258. DRM_INFO(" Multimon.\n");
  259. if (capabilities & SVGA_CAP_PITCHLOCK)
  260. DRM_INFO(" Pitchlock.\n");
  261. if (capabilities & SVGA_CAP_IRQMASK)
  262. DRM_INFO(" Irq mask.\n");
  263. if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
  264. DRM_INFO(" Display Topology.\n");
  265. if (capabilities & SVGA_CAP_GMR)
  266. DRM_INFO(" GMR.\n");
  267. if (capabilities & SVGA_CAP_TRACES)
  268. DRM_INFO(" Traces.\n");
  269. if (capabilities & SVGA_CAP_GMR2)
  270. DRM_INFO(" GMR2.\n");
  271. if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
  272. DRM_INFO(" Screen Object 2.\n");
  273. if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
  274. DRM_INFO(" Command Buffers.\n");
  275. if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
  276. DRM_INFO(" Command Buffers 2.\n");
  277. if (capabilities & SVGA_CAP_GBOBJECTS)
  278. DRM_INFO(" Guest Backed Resources.\n");
  279. if (capabilities & SVGA_CAP_DX)
  280. DRM_INFO(" DX Features.\n");
  281. if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
  282. DRM_INFO(" HP Command Queue.\n");
  283. }
  284. /**
  285. * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
  286. *
  287. * @dev_priv: A device private structure.
  288. *
  289. * This function creates a small buffer object that holds the query
  290. * result for dummy queries emitted as query barriers.
  291. * The function will then map the first page and initialize a pending
  292. * occlusion query result structure, Finally it will unmap the buffer.
  293. * No interruptible waits are done within this function.
  294. *
  295. * Returns an error if bo creation or initialization fails.
  296. */
  297. static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
  298. {
  299. int ret;
  300. struct vmw_dma_buffer *vbo;
  301. struct ttm_bo_kmap_obj map;
  302. volatile SVGA3dQueryResult *result;
  303. bool dummy;
  304. /*
  305. * Create the vbo as pinned, so that a tryreserve will
  306. * immediately succeed. This is because we're the only
  307. * user of the bo currently.
  308. */
  309. vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
  310. if (!vbo)
  311. return -ENOMEM;
  312. ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
  313. &vmw_sys_ne_placement, false,
  314. &vmw_dmabuf_bo_free);
  315. if (unlikely(ret != 0))
  316. return ret;
  317. ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
  318. BUG_ON(ret != 0);
  319. vmw_bo_pin_reserved(vbo, true);
  320. ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
  321. if (likely(ret == 0)) {
  322. result = ttm_kmap_obj_virtual(&map, &dummy);
  323. result->totalSize = sizeof(*result);
  324. result->state = SVGA3D_QUERYSTATE_PENDING;
  325. result->result32 = 0xff;
  326. ttm_bo_kunmap(&map);
  327. }
  328. vmw_bo_pin_reserved(vbo, false);
  329. ttm_bo_unreserve(&vbo->base);
  330. if (unlikely(ret != 0)) {
  331. DRM_ERROR("Dummy query buffer map failed.\n");
  332. vmw_dmabuf_unreference(&vbo);
  333. } else
  334. dev_priv->dummy_query_bo = vbo;
  335. return ret;
  336. }
  337. /**
  338. * vmw_request_device_late - Perform late device setup
  339. *
  340. * @dev_priv: Pointer to device private.
  341. *
  342. * This function performs setup of otables and enables large command
  343. * buffer submission. These tasks are split out to a separate function
  344. * because it reverts vmw_release_device_early and is intended to be used
  345. * by an error path in the hibernation code.
  346. */
  347. static int vmw_request_device_late(struct vmw_private *dev_priv)
  348. {
  349. int ret;
  350. if (dev_priv->has_mob) {
  351. ret = vmw_otables_setup(dev_priv);
  352. if (unlikely(ret != 0)) {
  353. DRM_ERROR("Unable to initialize "
  354. "guest Memory OBjects.\n");
  355. return ret;
  356. }
  357. }
  358. if (dev_priv->cman) {
  359. ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
  360. 256*4096, 2*4096);
  361. if (ret) {
  362. struct vmw_cmdbuf_man *man = dev_priv->cman;
  363. dev_priv->cman = NULL;
  364. vmw_cmdbuf_man_destroy(man);
  365. }
  366. }
  367. return 0;
  368. }
  369. static int vmw_request_device(struct vmw_private *dev_priv)
  370. {
  371. int ret;
  372. ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
  373. if (unlikely(ret != 0)) {
  374. DRM_ERROR("Unable to initialize FIFO.\n");
  375. return ret;
  376. }
  377. vmw_fence_fifo_up(dev_priv->fman);
  378. dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
  379. if (IS_ERR(dev_priv->cman)) {
  380. dev_priv->cman = NULL;
  381. dev_priv->has_dx = false;
  382. }
  383. ret = vmw_request_device_late(dev_priv);
  384. if (ret)
  385. goto out_no_mob;
  386. ret = vmw_dummy_query_bo_create(dev_priv);
  387. if (unlikely(ret != 0))
  388. goto out_no_query_bo;
  389. return 0;
  390. out_no_query_bo:
  391. if (dev_priv->cman)
  392. vmw_cmdbuf_remove_pool(dev_priv->cman);
  393. if (dev_priv->has_mob) {
  394. (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
  395. vmw_otables_takedown(dev_priv);
  396. }
  397. if (dev_priv->cman)
  398. vmw_cmdbuf_man_destroy(dev_priv->cman);
  399. out_no_mob:
  400. vmw_fence_fifo_down(dev_priv->fman);
  401. vmw_fifo_release(dev_priv, &dev_priv->fifo);
  402. return ret;
  403. }
  404. /**
  405. * vmw_release_device_early - Early part of fifo takedown.
  406. *
  407. * @dev_priv: Pointer to device private struct.
  408. *
  409. * This is the first part of command submission takedown, to be called before
  410. * buffer management is taken down.
  411. */
  412. static void vmw_release_device_early(struct vmw_private *dev_priv)
  413. {
  414. /*
  415. * Previous destructions should've released
  416. * the pinned bo.
  417. */
  418. BUG_ON(dev_priv->pinned_bo != NULL);
  419. vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
  420. if (dev_priv->cman)
  421. vmw_cmdbuf_remove_pool(dev_priv->cman);
  422. if (dev_priv->has_mob) {
  423. ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
  424. vmw_otables_takedown(dev_priv);
  425. }
  426. }
  427. /**
  428. * vmw_release_device_late - Late part of fifo takedown.
  429. *
  430. * @dev_priv: Pointer to device private struct.
  431. *
  432. * This is the last part of the command submission takedown, to be called when
  433. * command submission is no longer needed. It may wait on pending fences.
  434. */
  435. static void vmw_release_device_late(struct vmw_private *dev_priv)
  436. {
  437. vmw_fence_fifo_down(dev_priv->fman);
  438. if (dev_priv->cman)
  439. vmw_cmdbuf_man_destroy(dev_priv->cman);
  440. vmw_fifo_release(dev_priv, &dev_priv->fifo);
  441. }
  442. /**
  443. * Sets the initial_[width|height] fields on the given vmw_private.
  444. *
  445. * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
  446. * clamping the value to fb_max_[width|height] fields and the
  447. * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  448. * If the values appear to be invalid, set them to
  449. * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  450. */
  451. static void vmw_get_initial_size(struct vmw_private *dev_priv)
  452. {
  453. uint32_t width;
  454. uint32_t height;
  455. width = vmw_read(dev_priv, SVGA_REG_WIDTH);
  456. height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
  457. width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
  458. height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
  459. if (width > dev_priv->fb_max_width ||
  460. height > dev_priv->fb_max_height) {
  461. /*
  462. * This is a host error and shouldn't occur.
  463. */
  464. width = VMW_MIN_INITIAL_WIDTH;
  465. height = VMW_MIN_INITIAL_HEIGHT;
  466. }
  467. dev_priv->initial_width = width;
  468. dev_priv->initial_height = height;
  469. }
  470. /**
  471. * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
  472. * system.
  473. *
  474. * @dev_priv: Pointer to a struct vmw_private
  475. *
  476. * This functions tries to determine the IOMMU setup and what actions
  477. * need to be taken by the driver to make system pages visible to the
  478. * device.
  479. * If this function decides that DMA is not possible, it returns -EINVAL.
  480. * The driver may then try to disable features of the device that require
  481. * DMA.
  482. */
  483. static int vmw_dma_select_mode(struct vmw_private *dev_priv)
  484. {
  485. static const char *names[vmw_dma_map_max] = {
  486. [vmw_dma_phys] = "Using physical TTM page addresses.",
  487. [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
  488. [vmw_dma_map_populate] = "Keeping DMA mappings.",
  489. [vmw_dma_map_bind] = "Giving up DMA mappings early."};
  490. #ifdef CONFIG_X86
  491. const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
  492. #ifdef CONFIG_INTEL_IOMMU
  493. if (intel_iommu_enabled) {
  494. dev_priv->map_mode = vmw_dma_map_populate;
  495. goto out_fixup;
  496. }
  497. #endif
  498. if (!(vmw_force_iommu || vmw_force_coherent)) {
  499. dev_priv->map_mode = vmw_dma_phys;
  500. DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
  501. return 0;
  502. }
  503. dev_priv->map_mode = vmw_dma_map_populate;
  504. if (dma_ops->sync_single_for_cpu)
  505. dev_priv->map_mode = vmw_dma_alloc_coherent;
  506. #ifdef CONFIG_SWIOTLB
  507. if (swiotlb_nr_tbl() == 0)
  508. dev_priv->map_mode = vmw_dma_map_populate;
  509. #endif
  510. #ifdef CONFIG_INTEL_IOMMU
  511. out_fixup:
  512. #endif
  513. if (dev_priv->map_mode == vmw_dma_map_populate &&
  514. vmw_restrict_iommu)
  515. dev_priv->map_mode = vmw_dma_map_bind;
  516. if (vmw_force_coherent)
  517. dev_priv->map_mode = vmw_dma_alloc_coherent;
  518. #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
  519. /*
  520. * No coherent page pool
  521. */
  522. if (dev_priv->map_mode == vmw_dma_alloc_coherent)
  523. return -EINVAL;
  524. #endif
  525. #else /* CONFIG_X86 */
  526. dev_priv->map_mode = vmw_dma_map_populate;
  527. #endif /* CONFIG_X86 */
  528. DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
  529. return 0;
  530. }
  531. /**
  532. * vmw_dma_masks - set required page- and dma masks
  533. *
  534. * @dev: Pointer to struct drm-device
  535. *
  536. * With 32-bit we can only handle 32 bit PFNs. Optionally set that
  537. * restriction also for 64-bit systems.
  538. */
  539. #ifdef CONFIG_INTEL_IOMMU
  540. static int vmw_dma_masks(struct vmw_private *dev_priv)
  541. {
  542. struct drm_device *dev = dev_priv->dev;
  543. if (intel_iommu_enabled &&
  544. (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
  545. DRM_INFO("Restricting DMA addresses to 44 bits.\n");
  546. return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
  547. }
  548. return 0;
  549. }
  550. #else
  551. static int vmw_dma_masks(struct vmw_private *dev_priv)
  552. {
  553. return 0;
  554. }
  555. #endif
  556. static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
  557. {
  558. struct vmw_private *dev_priv;
  559. int ret;
  560. uint32_t svga_id;
  561. enum vmw_res_type i;
  562. bool refuse_dma = false;
  563. char host_log[100] = {0};
  564. dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
  565. if (unlikely(!dev_priv)) {
  566. DRM_ERROR("Failed allocating a device private struct.\n");
  567. return -ENOMEM;
  568. }
  569. pci_set_master(dev->pdev);
  570. dev_priv->dev = dev;
  571. dev_priv->vmw_chipset = chipset;
  572. dev_priv->last_read_seqno = (uint32_t) -100;
  573. mutex_init(&dev_priv->cmdbuf_mutex);
  574. mutex_init(&dev_priv->release_mutex);
  575. mutex_init(&dev_priv->binding_mutex);
  576. mutex_init(&dev_priv->global_kms_state_mutex);
  577. rwlock_init(&dev_priv->resource_lock);
  578. ttm_lock_init(&dev_priv->reservation_sem);
  579. spin_lock_init(&dev_priv->hw_lock);
  580. spin_lock_init(&dev_priv->waiter_lock);
  581. spin_lock_init(&dev_priv->cap_lock);
  582. spin_lock_init(&dev_priv->svga_lock);
  583. spin_lock_init(&dev_priv->cursor_lock);
  584. for (i = vmw_res_context; i < vmw_res_max; ++i) {
  585. idr_init(&dev_priv->res_idr[i]);
  586. INIT_LIST_HEAD(&dev_priv->res_lru[i]);
  587. }
  588. mutex_init(&dev_priv->init_mutex);
  589. init_waitqueue_head(&dev_priv->fence_queue);
  590. init_waitqueue_head(&dev_priv->fifo_queue);
  591. dev_priv->fence_queue_waiters = 0;
  592. dev_priv->fifo_queue_waiters = 0;
  593. dev_priv->used_memory_size = 0;
  594. dev_priv->io_start = pci_resource_start(dev->pdev, 0);
  595. dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
  596. dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
  597. dev_priv->assume_16bpp = !!vmw_assume_16bpp;
  598. dev_priv->enable_fb = enable_fbdev;
  599. vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
  600. svga_id = vmw_read(dev_priv, SVGA_REG_ID);
  601. if (svga_id != SVGA_ID_2) {
  602. ret = -ENOSYS;
  603. DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
  604. goto out_err0;
  605. }
  606. dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
  607. ret = vmw_dma_select_mode(dev_priv);
  608. if (unlikely(ret != 0)) {
  609. DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
  610. refuse_dma = true;
  611. }
  612. dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
  613. dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
  614. dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
  615. dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
  616. vmw_get_initial_size(dev_priv);
  617. if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  618. dev_priv->max_gmr_ids =
  619. vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
  620. dev_priv->max_gmr_pages =
  621. vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
  622. dev_priv->memory_size =
  623. vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
  624. dev_priv->memory_size -= dev_priv->vram_size;
  625. } else {
  626. /*
  627. * An arbitrary limit of 512MiB on surface
  628. * memory. But all HWV8 hardware supports GMR2.
  629. */
  630. dev_priv->memory_size = 512*1024*1024;
  631. }
  632. dev_priv->max_mob_pages = 0;
  633. dev_priv->max_mob_size = 0;
  634. if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
  635. uint64_t mem_size =
  636. vmw_read(dev_priv,
  637. SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
  638. /*
  639. * Workaround for low memory 2D VMs to compensate for the
  640. * allocation taken by fbdev
  641. */
  642. if (!(dev_priv->capabilities & SVGA_CAP_3D))
  643. mem_size *= 3;
  644. dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
  645. dev_priv->prim_bb_mem =
  646. vmw_read(dev_priv,
  647. SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
  648. dev_priv->max_mob_size =
  649. vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
  650. dev_priv->stdu_max_width =
  651. vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
  652. dev_priv->stdu_max_height =
  653. vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
  654. vmw_write(dev_priv, SVGA_REG_DEV_CAP,
  655. SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
  656. dev_priv->texture_max_width = vmw_read(dev_priv,
  657. SVGA_REG_DEV_CAP);
  658. vmw_write(dev_priv, SVGA_REG_DEV_CAP,
  659. SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
  660. dev_priv->texture_max_height = vmw_read(dev_priv,
  661. SVGA_REG_DEV_CAP);
  662. } else {
  663. dev_priv->texture_max_width = 8192;
  664. dev_priv->texture_max_height = 8192;
  665. dev_priv->prim_bb_mem = dev_priv->vram_size;
  666. }
  667. vmw_print_capabilities(dev_priv->capabilities);
  668. ret = vmw_dma_masks(dev_priv);
  669. if (unlikely(ret != 0))
  670. goto out_err0;
  671. if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  672. DRM_INFO("Max GMR ids is %u\n",
  673. (unsigned)dev_priv->max_gmr_ids);
  674. DRM_INFO("Max number of GMR pages is %u\n",
  675. (unsigned)dev_priv->max_gmr_pages);
  676. DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
  677. (unsigned)dev_priv->memory_size / 1024);
  678. }
  679. DRM_INFO("Maximum display memory size is %u kiB\n",
  680. dev_priv->prim_bb_mem / 1024);
  681. DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
  682. dev_priv->vram_start, dev_priv->vram_size / 1024);
  683. DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
  684. dev_priv->mmio_start, dev_priv->mmio_size / 1024);
  685. ret = vmw_ttm_global_init(dev_priv);
  686. if (unlikely(ret != 0))
  687. goto out_err0;
  688. vmw_master_init(&dev_priv->fbdev_master);
  689. ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
  690. dev_priv->active_master = &dev_priv->fbdev_master;
  691. dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
  692. dev_priv->mmio_size, MEMREMAP_WB);
  693. if (unlikely(dev_priv->mmio_virt == NULL)) {
  694. ret = -ENOMEM;
  695. DRM_ERROR("Failed mapping MMIO.\n");
  696. goto out_err3;
  697. }
  698. /* Need mmio memory to check for fifo pitchlock cap. */
  699. if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
  700. !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
  701. !vmw_fifo_have_pitchlock(dev_priv)) {
  702. ret = -ENOSYS;
  703. DRM_ERROR("Hardware has no pitchlock\n");
  704. goto out_err4;
  705. }
  706. dev_priv->tdev = ttm_object_device_init
  707. (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
  708. if (unlikely(dev_priv->tdev == NULL)) {
  709. DRM_ERROR("Unable to initialize TTM object management.\n");
  710. ret = -ENOMEM;
  711. goto out_err4;
  712. }
  713. dev->dev_private = dev_priv;
  714. ret = pci_request_regions(dev->pdev, "vmwgfx probe");
  715. dev_priv->stealth = (ret != 0);
  716. if (dev_priv->stealth) {
  717. /**
  718. * Request at least the mmio PCI resource.
  719. */
  720. DRM_INFO("It appears like vesafb is loaded. "
  721. "Ignore above error if any.\n");
  722. ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
  723. if (unlikely(ret != 0)) {
  724. DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
  725. goto out_no_device;
  726. }
  727. }
  728. if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
  729. ret = vmw_irq_install(dev, dev->pdev->irq);
  730. if (ret != 0) {
  731. DRM_ERROR("Failed installing irq: %d\n", ret);
  732. goto out_no_irq;
  733. }
  734. }
  735. dev_priv->fman = vmw_fence_manager_init(dev_priv);
  736. if (unlikely(dev_priv->fman == NULL)) {
  737. ret = -ENOMEM;
  738. goto out_no_fman;
  739. }
  740. ret = ttm_bo_device_init(&dev_priv->bdev,
  741. dev_priv->bo_global_ref.ref.object,
  742. &vmw_bo_driver,
  743. dev->anon_inode->i_mapping,
  744. VMWGFX_FILE_PAGE_OFFSET,
  745. false);
  746. if (unlikely(ret != 0)) {
  747. DRM_ERROR("Failed initializing TTM buffer object driver.\n");
  748. goto out_no_bdev;
  749. }
  750. /*
  751. * Enable VRAM, but initially don't use it until SVGA is enabled and
  752. * unhidden.
  753. */
  754. ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
  755. (dev_priv->vram_size >> PAGE_SHIFT));
  756. if (unlikely(ret != 0)) {
  757. DRM_ERROR("Failed initializing memory manager for VRAM.\n");
  758. goto out_no_vram;
  759. }
  760. dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
  761. dev_priv->has_gmr = true;
  762. if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
  763. refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
  764. VMW_PL_GMR) != 0) {
  765. DRM_INFO("No GMR memory available. "
  766. "Graphics memory resources are very limited.\n");
  767. dev_priv->has_gmr = false;
  768. }
  769. if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
  770. dev_priv->has_mob = true;
  771. if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
  772. VMW_PL_MOB) != 0) {
  773. DRM_INFO("No MOB memory available. "
  774. "3D will be disabled.\n");
  775. dev_priv->has_mob = false;
  776. }
  777. }
  778. if (dev_priv->has_mob) {
  779. spin_lock(&dev_priv->cap_lock);
  780. vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
  781. dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
  782. spin_unlock(&dev_priv->cap_lock);
  783. }
  784. ret = vmw_kms_init(dev_priv);
  785. if (unlikely(ret != 0))
  786. goto out_no_kms;
  787. vmw_overlay_init(dev_priv);
  788. ret = vmw_request_device(dev_priv);
  789. if (ret)
  790. goto out_no_fifo;
  791. DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
  792. DRM_INFO("Atomic: %s\n",
  793. (dev->driver->driver_features & DRIVER_ATOMIC) ? "yes" : "no");
  794. snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
  795. VMWGFX_REPO, VMWGFX_GIT_VERSION);
  796. vmw_host_log(host_log);
  797. memset(host_log, 0, sizeof(host_log));
  798. snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
  799. VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
  800. VMWGFX_DRIVER_PATCHLEVEL);
  801. vmw_host_log(host_log);
  802. if (dev_priv->enable_fb) {
  803. vmw_fifo_resource_inc(dev_priv);
  804. vmw_svga_enable(dev_priv);
  805. vmw_fb_init(dev_priv);
  806. }
  807. dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
  808. register_pm_notifier(&dev_priv->pm_nb);
  809. return 0;
  810. out_no_fifo:
  811. vmw_overlay_close(dev_priv);
  812. vmw_kms_close(dev_priv);
  813. out_no_kms:
  814. if (dev_priv->has_mob)
  815. (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
  816. if (dev_priv->has_gmr)
  817. (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  818. (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  819. out_no_vram:
  820. (void)ttm_bo_device_release(&dev_priv->bdev);
  821. out_no_bdev:
  822. vmw_fence_manager_takedown(dev_priv->fman);
  823. out_no_fman:
  824. if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  825. vmw_irq_uninstall(dev_priv->dev);
  826. out_no_irq:
  827. if (dev_priv->stealth)
  828. pci_release_region(dev->pdev, 2);
  829. else
  830. pci_release_regions(dev->pdev);
  831. out_no_device:
  832. ttm_object_device_release(&dev_priv->tdev);
  833. out_err4:
  834. memunmap(dev_priv->mmio_virt);
  835. out_err3:
  836. vmw_ttm_global_release(dev_priv);
  837. out_err0:
  838. for (i = vmw_res_context; i < vmw_res_max; ++i)
  839. idr_destroy(&dev_priv->res_idr[i]);
  840. if (dev_priv->ctx.staged_bindings)
  841. vmw_binding_state_free(dev_priv->ctx.staged_bindings);
  842. kfree(dev_priv);
  843. return ret;
  844. }
  845. static void vmw_driver_unload(struct drm_device *dev)
  846. {
  847. struct vmw_private *dev_priv = vmw_priv(dev);
  848. enum vmw_res_type i;
  849. unregister_pm_notifier(&dev_priv->pm_nb);
  850. if (dev_priv->ctx.res_ht_initialized)
  851. drm_ht_remove(&dev_priv->ctx.res_ht);
  852. vfree(dev_priv->ctx.cmd_bounce);
  853. if (dev_priv->enable_fb) {
  854. vmw_fb_off(dev_priv);
  855. vmw_fb_close(dev_priv);
  856. vmw_fifo_resource_dec(dev_priv);
  857. vmw_svga_disable(dev_priv);
  858. }
  859. vmw_kms_close(dev_priv);
  860. vmw_overlay_close(dev_priv);
  861. if (dev_priv->has_gmr)
  862. (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  863. (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  864. vmw_release_device_early(dev_priv);
  865. if (dev_priv->has_mob)
  866. (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
  867. (void) ttm_bo_device_release(&dev_priv->bdev);
  868. vmw_release_device_late(dev_priv);
  869. vmw_fence_manager_takedown(dev_priv->fman);
  870. if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  871. vmw_irq_uninstall(dev_priv->dev);
  872. if (dev_priv->stealth)
  873. pci_release_region(dev->pdev, 2);
  874. else
  875. pci_release_regions(dev->pdev);
  876. ttm_object_device_release(&dev_priv->tdev);
  877. memunmap(dev_priv->mmio_virt);
  878. if (dev_priv->ctx.staged_bindings)
  879. vmw_binding_state_free(dev_priv->ctx.staged_bindings);
  880. vmw_ttm_global_release(dev_priv);
  881. for (i = vmw_res_context; i < vmw_res_max; ++i)
  882. idr_destroy(&dev_priv->res_idr[i]);
  883. kfree(dev_priv);
  884. }
  885. static void vmw_postclose(struct drm_device *dev,
  886. struct drm_file *file_priv)
  887. {
  888. struct vmw_fpriv *vmw_fp;
  889. vmw_fp = vmw_fpriv(file_priv);
  890. if (vmw_fp->locked_master) {
  891. struct vmw_master *vmaster =
  892. vmw_master(vmw_fp->locked_master);
  893. ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  894. ttm_vt_unlock(&vmaster->lock);
  895. drm_master_put(&vmw_fp->locked_master);
  896. }
  897. ttm_object_file_release(&vmw_fp->tfile);
  898. kfree(vmw_fp);
  899. }
  900. static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
  901. {
  902. struct vmw_private *dev_priv = vmw_priv(dev);
  903. struct vmw_fpriv *vmw_fp;
  904. int ret = -ENOMEM;
  905. vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
  906. if (unlikely(!vmw_fp))
  907. return ret;
  908. vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
  909. if (unlikely(vmw_fp->tfile == NULL))
  910. goto out_no_tfile;
  911. file_priv->driver_priv = vmw_fp;
  912. return 0;
  913. out_no_tfile:
  914. kfree(vmw_fp);
  915. return ret;
  916. }
  917. static struct vmw_master *vmw_master_check(struct drm_device *dev,
  918. struct drm_file *file_priv,
  919. unsigned int flags)
  920. {
  921. int ret;
  922. struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  923. struct vmw_master *vmaster;
  924. if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
  925. return NULL;
  926. ret = mutex_lock_interruptible(&dev->master_mutex);
  927. if (unlikely(ret != 0))
  928. return ERR_PTR(-ERESTARTSYS);
  929. if (drm_is_current_master(file_priv)) {
  930. mutex_unlock(&dev->master_mutex);
  931. return NULL;
  932. }
  933. /*
  934. * Check if we were previously master, but now dropped. In that
  935. * case, allow at least render node functionality.
  936. */
  937. if (vmw_fp->locked_master) {
  938. mutex_unlock(&dev->master_mutex);
  939. if (flags & DRM_RENDER_ALLOW)
  940. return NULL;
  941. DRM_ERROR("Dropped master trying to access ioctl that "
  942. "requires authentication.\n");
  943. return ERR_PTR(-EACCES);
  944. }
  945. mutex_unlock(&dev->master_mutex);
  946. /*
  947. * Take the TTM lock. Possibly sleep waiting for the authenticating
  948. * master to become master again, or for a SIGTERM if the
  949. * authenticating master exits.
  950. */
  951. vmaster = vmw_master(file_priv->master);
  952. ret = ttm_read_lock(&vmaster->lock, true);
  953. if (unlikely(ret != 0))
  954. vmaster = ERR_PTR(ret);
  955. return vmaster;
  956. }
  957. static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
  958. unsigned long arg,
  959. long (*ioctl_func)(struct file *, unsigned int,
  960. unsigned long))
  961. {
  962. struct drm_file *file_priv = filp->private_data;
  963. struct drm_device *dev = file_priv->minor->dev;
  964. unsigned int nr = DRM_IOCTL_NR(cmd);
  965. struct vmw_master *vmaster;
  966. unsigned int flags;
  967. long ret;
  968. /*
  969. * Do extra checking on driver private ioctls.
  970. */
  971. if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
  972. && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
  973. const struct drm_ioctl_desc *ioctl =
  974. &vmw_ioctls[nr - DRM_COMMAND_BASE];
  975. if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
  976. ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
  977. if (unlikely(ret != 0))
  978. return ret;
  979. if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
  980. goto out_io_encoding;
  981. return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
  982. _IOC_SIZE(cmd));
  983. } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
  984. if (!drm_is_current_master(file_priv) &&
  985. !capable(CAP_SYS_ADMIN))
  986. return -EACCES;
  987. }
  988. if (unlikely(ioctl->cmd != cmd))
  989. goto out_io_encoding;
  990. flags = ioctl->flags;
  991. } else if (!drm_ioctl_flags(nr, &flags))
  992. return -EINVAL;
  993. vmaster = vmw_master_check(dev, file_priv, flags);
  994. if (IS_ERR(vmaster)) {
  995. ret = PTR_ERR(vmaster);
  996. if (ret != -ERESTARTSYS)
  997. DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
  998. nr, ret);
  999. return ret;
  1000. }
  1001. ret = ioctl_func(filp, cmd, arg);
  1002. if (vmaster)
  1003. ttm_read_unlock(&vmaster->lock);
  1004. return ret;
  1005. out_io_encoding:
  1006. DRM_ERROR("Invalid command format, ioctl %d\n",
  1007. nr - DRM_COMMAND_BASE);
  1008. return -EINVAL;
  1009. }
  1010. static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
  1011. unsigned long arg)
  1012. {
  1013. return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
  1014. }
  1015. #ifdef CONFIG_COMPAT
  1016. static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
  1017. unsigned long arg)
  1018. {
  1019. return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
  1020. }
  1021. #endif
  1022. static void vmw_lastclose(struct drm_device *dev)
  1023. {
  1024. }
  1025. static void vmw_master_init(struct vmw_master *vmaster)
  1026. {
  1027. ttm_lock_init(&vmaster->lock);
  1028. }
  1029. static int vmw_master_create(struct drm_device *dev,
  1030. struct drm_master *master)
  1031. {
  1032. struct vmw_master *vmaster;
  1033. vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
  1034. if (unlikely(!vmaster))
  1035. return -ENOMEM;
  1036. vmw_master_init(vmaster);
  1037. ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  1038. master->driver_priv = vmaster;
  1039. return 0;
  1040. }
  1041. static void vmw_master_destroy(struct drm_device *dev,
  1042. struct drm_master *master)
  1043. {
  1044. struct vmw_master *vmaster = vmw_master(master);
  1045. master->driver_priv = NULL;
  1046. kfree(vmaster);
  1047. }
  1048. static int vmw_master_set(struct drm_device *dev,
  1049. struct drm_file *file_priv,
  1050. bool from_open)
  1051. {
  1052. struct vmw_private *dev_priv = vmw_priv(dev);
  1053. struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  1054. struct vmw_master *active = dev_priv->active_master;
  1055. struct vmw_master *vmaster = vmw_master(file_priv->master);
  1056. int ret = 0;
  1057. if (active) {
  1058. BUG_ON(active != &dev_priv->fbdev_master);
  1059. ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
  1060. if (unlikely(ret != 0))
  1061. return ret;
  1062. ttm_lock_set_kill(&active->lock, true, SIGTERM);
  1063. dev_priv->active_master = NULL;
  1064. }
  1065. ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
  1066. if (!from_open) {
  1067. ttm_vt_unlock(&vmaster->lock);
  1068. BUG_ON(vmw_fp->locked_master != file_priv->master);
  1069. drm_master_put(&vmw_fp->locked_master);
  1070. }
  1071. dev_priv->active_master = vmaster;
  1072. drm_sysfs_hotplug_event(dev);
  1073. return 0;
  1074. }
  1075. static void vmw_master_drop(struct drm_device *dev,
  1076. struct drm_file *file_priv)
  1077. {
  1078. struct vmw_private *dev_priv = vmw_priv(dev);
  1079. struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  1080. struct vmw_master *vmaster = vmw_master(file_priv->master);
  1081. int ret;
  1082. /**
  1083. * Make sure the master doesn't disappear while we have
  1084. * it locked.
  1085. */
  1086. vmw_fp->locked_master = drm_master_get(file_priv->master);
  1087. ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
  1088. vmw_kms_legacy_hotspot_clear(dev_priv);
  1089. if (unlikely((ret != 0))) {
  1090. DRM_ERROR("Unable to lock TTM at VT switch.\n");
  1091. drm_master_put(&vmw_fp->locked_master);
  1092. }
  1093. ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
  1094. if (!dev_priv->enable_fb)
  1095. vmw_svga_disable(dev_priv);
  1096. dev_priv->active_master = &dev_priv->fbdev_master;
  1097. ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
  1098. ttm_vt_unlock(&dev_priv->fbdev_master.lock);
  1099. }
  1100. /**
  1101. * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
  1102. *
  1103. * @dev_priv: Pointer to device private struct.
  1104. * Needs the reservation sem to be held in non-exclusive mode.
  1105. */
  1106. static void __vmw_svga_enable(struct vmw_private *dev_priv)
  1107. {
  1108. spin_lock(&dev_priv->svga_lock);
  1109. if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
  1110. vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
  1111. dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
  1112. }
  1113. spin_unlock(&dev_priv->svga_lock);
  1114. }
  1115. /**
  1116. * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
  1117. *
  1118. * @dev_priv: Pointer to device private struct.
  1119. */
  1120. void vmw_svga_enable(struct vmw_private *dev_priv)
  1121. {
  1122. (void) ttm_read_lock(&dev_priv->reservation_sem, false);
  1123. __vmw_svga_enable(dev_priv);
  1124. ttm_read_unlock(&dev_priv->reservation_sem);
  1125. }
  1126. /**
  1127. * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
  1128. *
  1129. * @dev_priv: Pointer to device private struct.
  1130. * Needs the reservation sem to be held in exclusive mode.
  1131. * Will not empty VRAM. VRAM must be emptied by caller.
  1132. */
  1133. static void __vmw_svga_disable(struct vmw_private *dev_priv)
  1134. {
  1135. spin_lock(&dev_priv->svga_lock);
  1136. if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
  1137. dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
  1138. vmw_write(dev_priv, SVGA_REG_ENABLE,
  1139. SVGA_REG_ENABLE_HIDE |
  1140. SVGA_REG_ENABLE_ENABLE);
  1141. }
  1142. spin_unlock(&dev_priv->svga_lock);
  1143. }
  1144. /**
  1145. * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
  1146. * running.
  1147. *
  1148. * @dev_priv: Pointer to device private struct.
  1149. * Will empty VRAM.
  1150. */
  1151. void vmw_svga_disable(struct vmw_private *dev_priv)
  1152. {
  1153. /*
  1154. * Disabling SVGA will turn off device modesetting capabilities, so
  1155. * notify KMS about that so that it doesn't cache atomic state that
  1156. * isn't valid anymore, for example crtcs turned on.
  1157. * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
  1158. * but vmw_kms_lost_device() takes the reservation sem and thus we'll
  1159. * end up with lock order reversal. Thus, a master may actually perform
  1160. * a new modeset just after we call vmw_kms_lost_device() and race with
  1161. * vmw_svga_disable(), but that should at worst cause atomic KMS state
  1162. * to be inconsistent with the device, causing modesetting problems.
  1163. *
  1164. */
  1165. vmw_kms_lost_device(dev_priv->dev);
  1166. ttm_write_lock(&dev_priv->reservation_sem, false);
  1167. spin_lock(&dev_priv->svga_lock);
  1168. if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
  1169. dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
  1170. spin_unlock(&dev_priv->svga_lock);
  1171. if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
  1172. DRM_ERROR("Failed evicting VRAM buffers.\n");
  1173. vmw_write(dev_priv, SVGA_REG_ENABLE,
  1174. SVGA_REG_ENABLE_HIDE |
  1175. SVGA_REG_ENABLE_ENABLE);
  1176. } else
  1177. spin_unlock(&dev_priv->svga_lock);
  1178. ttm_write_unlock(&dev_priv->reservation_sem);
  1179. }
  1180. static void vmw_remove(struct pci_dev *pdev)
  1181. {
  1182. struct drm_device *dev = pci_get_drvdata(pdev);
  1183. pci_disable_device(pdev);
  1184. drm_put_dev(dev);
  1185. }
  1186. static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
  1187. void *ptr)
  1188. {
  1189. struct vmw_private *dev_priv =
  1190. container_of(nb, struct vmw_private, pm_nb);
  1191. switch (val) {
  1192. case PM_HIBERNATION_PREPARE:
  1193. /*
  1194. * Take the reservation sem in write mode, which will make sure
  1195. * there are no other processes holding a buffer object
  1196. * reservation, meaning we should be able to evict all buffer
  1197. * objects if needed.
  1198. * Once user-space processes have been frozen, we can release
  1199. * the lock again.
  1200. */
  1201. ttm_suspend_lock(&dev_priv->reservation_sem);
  1202. dev_priv->suspend_locked = true;
  1203. break;
  1204. case PM_POST_HIBERNATION:
  1205. case PM_POST_RESTORE:
  1206. if (READ_ONCE(dev_priv->suspend_locked)) {
  1207. dev_priv->suspend_locked = false;
  1208. ttm_suspend_unlock(&dev_priv->reservation_sem);
  1209. }
  1210. break;
  1211. default:
  1212. break;
  1213. }
  1214. return 0;
  1215. }
  1216. static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  1217. {
  1218. struct drm_device *dev = pci_get_drvdata(pdev);
  1219. struct vmw_private *dev_priv = vmw_priv(dev);
  1220. if (dev_priv->refuse_hibernation)
  1221. return -EBUSY;
  1222. pci_save_state(pdev);
  1223. pci_disable_device(pdev);
  1224. pci_set_power_state(pdev, PCI_D3hot);
  1225. return 0;
  1226. }
  1227. static int vmw_pci_resume(struct pci_dev *pdev)
  1228. {
  1229. pci_set_power_state(pdev, PCI_D0);
  1230. pci_restore_state(pdev);
  1231. return pci_enable_device(pdev);
  1232. }
  1233. static int vmw_pm_suspend(struct device *kdev)
  1234. {
  1235. struct pci_dev *pdev = to_pci_dev(kdev);
  1236. struct pm_message dummy;
  1237. dummy.event = 0;
  1238. return vmw_pci_suspend(pdev, dummy);
  1239. }
  1240. static int vmw_pm_resume(struct device *kdev)
  1241. {
  1242. struct pci_dev *pdev = to_pci_dev(kdev);
  1243. return vmw_pci_resume(pdev);
  1244. }
  1245. static int vmw_pm_freeze(struct device *kdev)
  1246. {
  1247. struct pci_dev *pdev = to_pci_dev(kdev);
  1248. struct drm_device *dev = pci_get_drvdata(pdev);
  1249. struct vmw_private *dev_priv = vmw_priv(dev);
  1250. int ret;
  1251. /*
  1252. * Unlock for vmw_kms_suspend.
  1253. * No user-space processes should be running now.
  1254. */
  1255. ttm_suspend_unlock(&dev_priv->reservation_sem);
  1256. ret = vmw_kms_suspend(dev_priv->dev);
  1257. if (ret) {
  1258. ttm_suspend_lock(&dev_priv->reservation_sem);
  1259. DRM_ERROR("Failed to freeze modesetting.\n");
  1260. return ret;
  1261. }
  1262. if (dev_priv->enable_fb)
  1263. vmw_fb_off(dev_priv);
  1264. ttm_suspend_lock(&dev_priv->reservation_sem);
  1265. vmw_execbuf_release_pinned_bo(dev_priv);
  1266. vmw_resource_evict_all(dev_priv);
  1267. vmw_release_device_early(dev_priv);
  1268. ttm_bo_swapout_all(&dev_priv->bdev);
  1269. if (dev_priv->enable_fb)
  1270. vmw_fifo_resource_dec(dev_priv);
  1271. if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
  1272. DRM_ERROR("Can't hibernate while 3D resources are active.\n");
  1273. if (dev_priv->enable_fb)
  1274. vmw_fifo_resource_inc(dev_priv);
  1275. WARN_ON(vmw_request_device_late(dev_priv));
  1276. dev_priv->suspend_locked = false;
  1277. ttm_suspend_unlock(&dev_priv->reservation_sem);
  1278. if (dev_priv->suspend_state)
  1279. vmw_kms_resume(dev);
  1280. if (dev_priv->enable_fb)
  1281. vmw_fb_on(dev_priv);
  1282. return -EBUSY;
  1283. }
  1284. vmw_fence_fifo_down(dev_priv->fman);
  1285. __vmw_svga_disable(dev_priv);
  1286. vmw_release_device_late(dev_priv);
  1287. return 0;
  1288. }
  1289. static int vmw_pm_restore(struct device *kdev)
  1290. {
  1291. struct pci_dev *pdev = to_pci_dev(kdev);
  1292. struct drm_device *dev = pci_get_drvdata(pdev);
  1293. struct vmw_private *dev_priv = vmw_priv(dev);
  1294. int ret;
  1295. vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
  1296. (void) vmw_read(dev_priv, SVGA_REG_ID);
  1297. if (dev_priv->enable_fb)
  1298. vmw_fifo_resource_inc(dev_priv);
  1299. ret = vmw_request_device(dev_priv);
  1300. if (ret)
  1301. return ret;
  1302. if (dev_priv->enable_fb)
  1303. __vmw_svga_enable(dev_priv);
  1304. vmw_fence_fifo_up(dev_priv->fman);
  1305. dev_priv->suspend_locked = false;
  1306. ttm_suspend_unlock(&dev_priv->reservation_sem);
  1307. if (dev_priv->suspend_state)
  1308. vmw_kms_resume(dev_priv->dev);
  1309. if (dev_priv->enable_fb)
  1310. vmw_fb_on(dev_priv);
  1311. return 0;
  1312. }
  1313. static const struct dev_pm_ops vmw_pm_ops = {
  1314. .freeze = vmw_pm_freeze,
  1315. .thaw = vmw_pm_restore,
  1316. .restore = vmw_pm_restore,
  1317. .suspend = vmw_pm_suspend,
  1318. .resume = vmw_pm_resume,
  1319. };
  1320. static const struct file_operations vmwgfx_driver_fops = {
  1321. .owner = THIS_MODULE,
  1322. .open = drm_open,
  1323. .release = drm_release,
  1324. .unlocked_ioctl = vmw_unlocked_ioctl,
  1325. .mmap = vmw_mmap,
  1326. .poll = vmw_fops_poll,
  1327. .read = vmw_fops_read,
  1328. #if defined(CONFIG_COMPAT)
  1329. .compat_ioctl = vmw_compat_ioctl,
  1330. #endif
  1331. .llseek = noop_llseek,
  1332. };
  1333. static struct drm_driver driver = {
  1334. .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
  1335. DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
  1336. .load = vmw_driver_load,
  1337. .unload = vmw_driver_unload,
  1338. .lastclose = vmw_lastclose,
  1339. .get_vblank_counter = vmw_get_vblank_counter,
  1340. .enable_vblank = vmw_enable_vblank,
  1341. .disable_vblank = vmw_disable_vblank,
  1342. .ioctls = vmw_ioctls,
  1343. .num_ioctls = ARRAY_SIZE(vmw_ioctls),
  1344. .master_create = vmw_master_create,
  1345. .master_destroy = vmw_master_destroy,
  1346. .master_set = vmw_master_set,
  1347. .master_drop = vmw_master_drop,
  1348. .open = vmw_driver_open,
  1349. .postclose = vmw_postclose,
  1350. .dumb_create = vmw_dumb_create,
  1351. .dumb_map_offset = vmw_dumb_map_offset,
  1352. .dumb_destroy = vmw_dumb_destroy,
  1353. .prime_fd_to_handle = vmw_prime_fd_to_handle,
  1354. .prime_handle_to_fd = vmw_prime_handle_to_fd,
  1355. .fops = &vmwgfx_driver_fops,
  1356. .name = VMWGFX_DRIVER_NAME,
  1357. .desc = VMWGFX_DRIVER_DESC,
  1358. .date = VMWGFX_DRIVER_DATE,
  1359. .major = VMWGFX_DRIVER_MAJOR,
  1360. .minor = VMWGFX_DRIVER_MINOR,
  1361. .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
  1362. };
  1363. static struct pci_driver vmw_pci_driver = {
  1364. .name = VMWGFX_DRIVER_NAME,
  1365. .id_table = vmw_pci_id_list,
  1366. .probe = vmw_probe,
  1367. .remove = vmw_remove,
  1368. .driver = {
  1369. .pm = &vmw_pm_ops
  1370. }
  1371. };
  1372. static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1373. {
  1374. return drm_get_pci_dev(pdev, ent, &driver);
  1375. }
  1376. static int __init vmwgfx_init(void)
  1377. {
  1378. int ret;
  1379. if (vgacon_text_force())
  1380. return -EINVAL;
  1381. ret = pci_register_driver(&vmw_pci_driver);
  1382. if (ret)
  1383. DRM_ERROR("Failed initializing DRM.\n");
  1384. return ret;
  1385. }
  1386. static void __exit vmwgfx_exit(void)
  1387. {
  1388. pci_unregister_driver(&vmw_pci_driver);
  1389. }
  1390. module_init(vmwgfx_init);
  1391. module_exit(vmwgfx_exit);
  1392. MODULE_AUTHOR("VMware Inc. and others");
  1393. MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
  1394. MODULE_LICENSE("GPL and additional rights");
  1395. MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
  1396. __stringify(VMWGFX_DRIVER_MINOR) "."
  1397. __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
  1398. "0");