vmwgfx_ioctl.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include <drm/vmwgfx_drm.h>
  29. #include "vmwgfx_kms.h"
  30. #include "device_include/svga3d_caps.h"
  31. struct svga_3d_compat_cap {
  32. SVGA3dCapsRecordHeader header;
  33. SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
  34. };
  35. int vmw_getparam_ioctl(struct drm_device *dev, void *data,
  36. struct drm_file *file_priv)
  37. {
  38. struct vmw_private *dev_priv = vmw_priv(dev);
  39. struct drm_vmw_getparam_arg *param =
  40. (struct drm_vmw_getparam_arg *)data;
  41. struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  42. switch (param->param) {
  43. case DRM_VMW_PARAM_NUM_STREAMS:
  44. param->value = vmw_overlay_num_overlays(dev_priv);
  45. break;
  46. case DRM_VMW_PARAM_NUM_FREE_STREAMS:
  47. param->value = vmw_overlay_num_free_overlays(dev_priv);
  48. break;
  49. case DRM_VMW_PARAM_3D:
  50. param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
  51. break;
  52. case DRM_VMW_PARAM_HW_CAPS:
  53. param->value = dev_priv->capabilities;
  54. break;
  55. case DRM_VMW_PARAM_FIFO_CAPS:
  56. param->value = dev_priv->fifo.capabilities;
  57. break;
  58. case DRM_VMW_PARAM_MAX_FB_SIZE:
  59. param->value = dev_priv->prim_bb_mem;
  60. break;
  61. case DRM_VMW_PARAM_FIFO_HW_VERSION:
  62. {
  63. u32 __iomem *fifo_mem = dev_priv->mmio_virt;
  64. const struct vmw_fifo_state *fifo = &dev_priv->fifo;
  65. if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
  66. param->value = SVGA3D_HWVERSION_WS8_B1;
  67. break;
  68. }
  69. param->value =
  70. ioread32(fifo_mem +
  71. ((fifo->capabilities &
  72. SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
  73. SVGA_FIFO_3D_HWVERSION_REVISED :
  74. SVGA_FIFO_3D_HWVERSION));
  75. break;
  76. }
  77. case DRM_VMW_PARAM_MAX_SURF_MEMORY:
  78. if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
  79. !vmw_fp->gb_aware)
  80. param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
  81. else
  82. param->value = dev_priv->memory_size;
  83. break;
  84. case DRM_VMW_PARAM_3D_CAPS_SIZE:
  85. if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
  86. vmw_fp->gb_aware)
  87. param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
  88. else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
  89. param->value = sizeof(struct svga_3d_compat_cap) +
  90. sizeof(uint32_t);
  91. else
  92. param->value = (SVGA_FIFO_3D_CAPS_LAST -
  93. SVGA_FIFO_3D_CAPS + 1) *
  94. sizeof(uint32_t);
  95. break;
  96. case DRM_VMW_PARAM_MAX_MOB_MEMORY:
  97. vmw_fp->gb_aware = true;
  98. param->value = dev_priv->max_mob_pages * PAGE_SIZE;
  99. break;
  100. case DRM_VMW_PARAM_MAX_MOB_SIZE:
  101. param->value = dev_priv->max_mob_size;
  102. break;
  103. case DRM_VMW_PARAM_SCREEN_TARGET:
  104. param->value =
  105. (dev_priv->active_display_unit == vmw_du_screen_target);
  106. break;
  107. case DRM_VMW_PARAM_DX:
  108. param->value = dev_priv->has_dx;
  109. break;
  110. default:
  111. DRM_ERROR("Illegal vmwgfx get param request: %d\n",
  112. param->param);
  113. return -EINVAL;
  114. }
  115. return 0;
  116. }
  117. static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
  118. size_t size)
  119. {
  120. struct svga_3d_compat_cap *compat_cap =
  121. (struct svga_3d_compat_cap *) bounce;
  122. unsigned int i;
  123. size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
  124. unsigned int max_size;
  125. if (size < pair_offset)
  126. return -EINVAL;
  127. max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
  128. if (max_size > SVGA3D_DEVCAP_MAX)
  129. max_size = SVGA3D_DEVCAP_MAX;
  130. compat_cap->header.length =
  131. (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
  132. compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
  133. spin_lock(&dev_priv->cap_lock);
  134. for (i = 0; i < max_size; ++i) {
  135. vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
  136. compat_cap->pairs[i][0] = i;
  137. compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
  138. }
  139. spin_unlock(&dev_priv->cap_lock);
  140. return 0;
  141. }
  142. int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
  143. struct drm_file *file_priv)
  144. {
  145. struct drm_vmw_get_3d_cap_arg *arg =
  146. (struct drm_vmw_get_3d_cap_arg *) data;
  147. struct vmw_private *dev_priv = vmw_priv(dev);
  148. uint32_t size;
  149. u32 __iomem *fifo_mem;
  150. void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
  151. void *bounce;
  152. int ret;
  153. bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
  154. struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  155. if (unlikely(arg->pad64 != 0)) {
  156. DRM_ERROR("Illegal GET_3D_CAP argument.\n");
  157. return -EINVAL;
  158. }
  159. if (gb_objects && vmw_fp->gb_aware)
  160. size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
  161. else if (gb_objects)
  162. size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
  163. else
  164. size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
  165. sizeof(uint32_t);
  166. if (arg->max_size < size)
  167. size = arg->max_size;
  168. bounce = vzalloc(size);
  169. if (unlikely(bounce == NULL)) {
  170. DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
  171. return -ENOMEM;
  172. }
  173. if (gb_objects && vmw_fp->gb_aware) {
  174. int i, num;
  175. uint32_t *bounce32 = (uint32_t *) bounce;
  176. num = size / sizeof(uint32_t);
  177. if (num > SVGA3D_DEVCAP_MAX)
  178. num = SVGA3D_DEVCAP_MAX;
  179. spin_lock(&dev_priv->cap_lock);
  180. for (i = 0; i < num; ++i) {
  181. vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
  182. *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
  183. }
  184. spin_unlock(&dev_priv->cap_lock);
  185. } else if (gb_objects) {
  186. ret = vmw_fill_compat_cap(dev_priv, bounce, size);
  187. if (unlikely(ret != 0))
  188. goto out_err;
  189. } else {
  190. fifo_mem = dev_priv->mmio_virt;
  191. memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
  192. }
  193. ret = copy_to_user(buffer, bounce, size);
  194. if (ret)
  195. ret = -EFAULT;
  196. out_err:
  197. vfree(bounce);
  198. if (unlikely(ret != 0))
  199. DRM_ERROR("Failed to report 3D caps info.\n");
  200. return ret;
  201. }
  202. int vmw_present_ioctl(struct drm_device *dev, void *data,
  203. struct drm_file *file_priv)
  204. {
  205. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  206. struct vmw_private *dev_priv = vmw_priv(dev);
  207. struct drm_vmw_present_arg *arg =
  208. (struct drm_vmw_present_arg *)data;
  209. struct vmw_surface *surface;
  210. struct drm_vmw_rect __user *clips_ptr;
  211. struct drm_vmw_rect *clips = NULL;
  212. struct drm_framebuffer *fb;
  213. struct vmw_framebuffer *vfb;
  214. struct vmw_resource *res;
  215. uint32_t num_clips;
  216. int ret;
  217. num_clips = arg->num_clips;
  218. clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
  219. if (unlikely(num_clips == 0))
  220. return 0;
  221. if (clips_ptr == NULL) {
  222. DRM_ERROR("Variable clips_ptr must be specified.\n");
  223. ret = -EINVAL;
  224. goto out_clips;
  225. }
  226. clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
  227. if (clips == NULL) {
  228. DRM_ERROR("Failed to allocate clip rect list.\n");
  229. ret = -ENOMEM;
  230. goto out_clips;
  231. }
  232. ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
  233. if (ret) {
  234. DRM_ERROR("Failed to copy clip rects from userspace.\n");
  235. ret = -EFAULT;
  236. goto out_no_copy;
  237. }
  238. drm_modeset_lock_all(dev);
  239. fb = drm_framebuffer_lookup(dev, arg->fb_id);
  240. if (!fb) {
  241. DRM_ERROR("Invalid framebuffer id.\n");
  242. ret = -ENOENT;
  243. goto out_no_fb;
  244. }
  245. vfb = vmw_framebuffer_to_vfb(fb);
  246. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  247. if (unlikely(ret != 0))
  248. goto out_no_ttm_lock;
  249. ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
  250. user_surface_converter,
  251. &res);
  252. if (ret)
  253. goto out_no_surface;
  254. surface = vmw_res_to_srf(res);
  255. ret = vmw_kms_present(dev_priv, file_priv,
  256. vfb, surface, arg->sid,
  257. arg->dest_x, arg->dest_y,
  258. clips, num_clips);
  259. /* vmw_user_surface_lookup takes one ref so does new_fb */
  260. vmw_surface_unreference(&surface);
  261. out_no_surface:
  262. ttm_read_unlock(&dev_priv->reservation_sem);
  263. out_no_ttm_lock:
  264. drm_framebuffer_unreference(fb);
  265. out_no_fb:
  266. drm_modeset_unlock_all(dev);
  267. out_no_copy:
  268. kfree(clips);
  269. out_clips:
  270. return ret;
  271. }
  272. int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
  273. struct drm_file *file_priv)
  274. {
  275. struct vmw_private *dev_priv = vmw_priv(dev);
  276. struct drm_vmw_present_readback_arg *arg =
  277. (struct drm_vmw_present_readback_arg *)data;
  278. struct drm_vmw_fence_rep __user *user_fence_rep =
  279. (struct drm_vmw_fence_rep __user *)
  280. (unsigned long)arg->fence_rep;
  281. struct drm_vmw_rect __user *clips_ptr;
  282. struct drm_vmw_rect *clips = NULL;
  283. struct drm_framebuffer *fb;
  284. struct vmw_framebuffer *vfb;
  285. uint32_t num_clips;
  286. int ret;
  287. num_clips = arg->num_clips;
  288. clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
  289. if (unlikely(num_clips == 0))
  290. return 0;
  291. if (clips_ptr == NULL) {
  292. DRM_ERROR("Argument clips_ptr must be specified.\n");
  293. ret = -EINVAL;
  294. goto out_clips;
  295. }
  296. clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
  297. if (clips == NULL) {
  298. DRM_ERROR("Failed to allocate clip rect list.\n");
  299. ret = -ENOMEM;
  300. goto out_clips;
  301. }
  302. ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
  303. if (ret) {
  304. DRM_ERROR("Failed to copy clip rects from userspace.\n");
  305. ret = -EFAULT;
  306. goto out_no_copy;
  307. }
  308. drm_modeset_lock_all(dev);
  309. fb = drm_framebuffer_lookup(dev, arg->fb_id);
  310. if (!fb) {
  311. DRM_ERROR("Invalid framebuffer id.\n");
  312. ret = -ENOENT;
  313. goto out_no_fb;
  314. }
  315. vfb = vmw_framebuffer_to_vfb(fb);
  316. if (!vfb->dmabuf) {
  317. DRM_ERROR("Framebuffer not dmabuf backed.\n");
  318. ret = -EINVAL;
  319. goto out_no_ttm_lock;
  320. }
  321. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  322. if (unlikely(ret != 0))
  323. goto out_no_ttm_lock;
  324. ret = vmw_kms_readback(dev_priv, file_priv,
  325. vfb, user_fence_rep,
  326. clips, num_clips);
  327. ttm_read_unlock(&dev_priv->reservation_sem);
  328. out_no_ttm_lock:
  329. drm_framebuffer_unreference(fb);
  330. out_no_fb:
  331. drm_modeset_unlock_all(dev);
  332. out_no_copy:
  333. kfree(clips);
  334. out_clips:
  335. return ret;
  336. }
  337. /**
  338. * vmw_fops_poll - wrapper around the drm_poll function
  339. *
  340. * @filp: See the linux fops poll documentation.
  341. * @wait: See the linux fops poll documentation.
  342. *
  343. * Wrapper around the drm_poll function that makes sure the device is
  344. * processing the fifo if drm_poll decides to wait.
  345. */
  346. unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
  347. {
  348. struct drm_file *file_priv = filp->private_data;
  349. struct vmw_private *dev_priv =
  350. vmw_priv(file_priv->minor->dev);
  351. vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
  352. return drm_poll(filp, wait);
  353. }
  354. /**
  355. * vmw_fops_read - wrapper around the drm_read function
  356. *
  357. * @filp: See the linux fops read documentation.
  358. * @buffer: See the linux fops read documentation.
  359. * @count: See the linux fops read documentation.
  360. * offset: See the linux fops read documentation.
  361. *
  362. * Wrapper around the drm_read function that makes sure the device is
  363. * processing the fifo if drm_read decides to wait.
  364. */
  365. ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
  366. size_t count, loff_t *offset)
  367. {
  368. struct drm_file *file_priv = filp->private_data;
  369. struct vmw_private *dev_priv =
  370. vmw_priv(file_priv->minor->dev);
  371. vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
  372. return drm_read(filp, buffer, count, offset);
  373. }