vmwgfx_bo.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include <drm/ttm/ttm_placement.h>
  29. #include <drm/drmP.h>
  30. #include "vmwgfx_drv.h"
  31. #include "ttm_object.h"
  32. /**
  33. * struct vmw_user_buffer_object - User-space-visible buffer object
  34. *
  35. * @prime: The prime object providing user visibility.
  36. * @vbo: The struct vmw_buffer_object
  37. */
  38. struct vmw_user_buffer_object {
  39. struct ttm_prime_object prime;
  40. struct vmw_buffer_object vbo;
  41. };
  42. /**
  43. * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
  44. * vmw_buffer_object.
  45. *
  46. * @bo: Pointer to the TTM buffer object.
  47. * Return: Pointer to the struct vmw_buffer_object embedding the
  48. * TTM buffer object.
  49. */
  50. static struct vmw_buffer_object *
  51. vmw_buffer_object(struct ttm_buffer_object *bo)
  52. {
  53. return container_of(bo, struct vmw_buffer_object, base);
  54. }
  55. /**
  56. * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
  57. * vmw_user_buffer_object.
  58. *
  59. * @bo: Pointer to the TTM buffer object.
  60. * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
  61. * object.
  62. */
  63. static struct vmw_user_buffer_object *
  64. vmw_user_buffer_object(struct ttm_buffer_object *bo)
  65. {
  66. struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  67. return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
  68. }
  69. /**
  70. * vmw_bo_pin_in_placement - Validate a buffer to placement.
  71. *
  72. * @dev_priv: Driver private.
  73. * @buf: DMA buffer to move.
  74. * @placement: The placement to pin it.
  75. * @interruptible: Use interruptible wait.
  76. * Return: Zero on success, Negative error code on failure. In particular
  77. * -ERESTARTSYS if interrupted by a signal
  78. */
  79. int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
  80. struct vmw_buffer_object *buf,
  81. struct ttm_placement *placement,
  82. bool interruptible)
  83. {
  84. struct ttm_operation_ctx ctx = {interruptible, false };
  85. struct ttm_buffer_object *bo = &buf->base;
  86. int ret;
  87. uint32_t new_flags;
  88. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  89. if (unlikely(ret != 0))
  90. return ret;
  91. vmw_execbuf_release_pinned_bo(dev_priv);
  92. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  93. if (unlikely(ret != 0))
  94. goto err;
  95. if (buf->pin_count > 0)
  96. ret = ttm_bo_mem_compat(placement, &bo->mem,
  97. &new_flags) == true ? 0 : -EINVAL;
  98. else
  99. ret = ttm_bo_validate(bo, placement, &ctx);
  100. if (!ret)
  101. vmw_bo_pin_reserved(buf, true);
  102. ttm_bo_unreserve(bo);
  103. err:
  104. ttm_write_unlock(&dev_priv->reservation_sem);
  105. return ret;
  106. }
  107. /**
  108. * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
  109. *
  110. * This function takes the reservation_sem in write mode.
  111. * Flushes and unpins the query bo to avoid failures.
  112. *
  113. * @dev_priv: Driver private.
  114. * @buf: DMA buffer to move.
  115. * @pin: Pin buffer if true.
  116. * @interruptible: Use interruptible wait.
  117. * Return: Zero on success, Negative error code on failure. In particular
  118. * -ERESTARTSYS if interrupted by a signal
  119. */
  120. int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
  121. struct vmw_buffer_object *buf,
  122. bool interruptible)
  123. {
  124. struct ttm_operation_ctx ctx = {interruptible, false };
  125. struct ttm_buffer_object *bo = &buf->base;
  126. int ret;
  127. uint32_t new_flags;
  128. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  129. if (unlikely(ret != 0))
  130. return ret;
  131. vmw_execbuf_release_pinned_bo(dev_priv);
  132. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  133. if (unlikely(ret != 0))
  134. goto err;
  135. if (buf->pin_count > 0) {
  136. ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
  137. &new_flags) == true ? 0 : -EINVAL;
  138. goto out_unreserve;
  139. }
  140. ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
  141. if (likely(ret == 0) || ret == -ERESTARTSYS)
  142. goto out_unreserve;
  143. ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
  144. out_unreserve:
  145. if (!ret)
  146. vmw_bo_pin_reserved(buf, true);
  147. ttm_bo_unreserve(bo);
  148. err:
  149. ttm_write_unlock(&dev_priv->reservation_sem);
  150. return ret;
  151. }
  152. /**
  153. * vmw_bo_pin_in_vram - Move a buffer to vram.
  154. *
  155. * This function takes the reservation_sem in write mode.
  156. * Flushes and unpins the query bo to avoid failures.
  157. *
  158. * @dev_priv: Driver private.
  159. * @buf: DMA buffer to move.
  160. * @interruptible: Use interruptible wait.
  161. * Return: Zero on success, Negative error code on failure. In particular
  162. * -ERESTARTSYS if interrupted by a signal
  163. */
  164. int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
  165. struct vmw_buffer_object *buf,
  166. bool interruptible)
  167. {
  168. return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
  169. interruptible);
  170. }
  171. /**
  172. * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
  173. *
  174. * This function takes the reservation_sem in write mode.
  175. * Flushes and unpins the query bo to avoid failures.
  176. *
  177. * @dev_priv: Driver private.
  178. * @buf: DMA buffer to pin.
  179. * @interruptible: Use interruptible wait.
  180. * Return: Zero on success, Negative error code on failure. In particular
  181. * -ERESTARTSYS if interrupted by a signal
  182. */
  183. int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
  184. struct vmw_buffer_object *buf,
  185. bool interruptible)
  186. {
  187. struct ttm_operation_ctx ctx = {interruptible, false };
  188. struct ttm_buffer_object *bo = &buf->base;
  189. struct ttm_placement placement;
  190. struct ttm_place place;
  191. int ret = 0;
  192. uint32_t new_flags;
  193. place = vmw_vram_placement.placement[0];
  194. place.lpfn = bo->num_pages;
  195. placement.num_placement = 1;
  196. placement.placement = &place;
  197. placement.num_busy_placement = 1;
  198. placement.busy_placement = &place;
  199. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  200. if (unlikely(ret != 0))
  201. return ret;
  202. vmw_execbuf_release_pinned_bo(dev_priv);
  203. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  204. if (unlikely(ret != 0))
  205. goto err_unlock;
  206. /*
  207. * Is this buffer already in vram but not at the start of it?
  208. * In that case, evict it first because TTM isn't good at handling
  209. * that situation.
  210. */
  211. if (bo->mem.mem_type == TTM_PL_VRAM &&
  212. bo->mem.start < bo->num_pages &&
  213. bo->mem.start > 0 &&
  214. buf->pin_count == 0) {
  215. ctx.interruptible = false;
  216. (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
  217. }
  218. if (buf->pin_count > 0)
  219. ret = ttm_bo_mem_compat(&placement, &bo->mem,
  220. &new_flags) == true ? 0 : -EINVAL;
  221. else
  222. ret = ttm_bo_validate(bo, &placement, &ctx);
  223. /* For some reason we didn't end up at the start of vram */
  224. WARN_ON(ret == 0 && bo->offset != 0);
  225. if (!ret)
  226. vmw_bo_pin_reserved(buf, true);
  227. ttm_bo_unreserve(bo);
  228. err_unlock:
  229. ttm_write_unlock(&dev_priv->reservation_sem);
  230. return ret;
  231. }
  232. /**
  233. * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
  234. *
  235. * This function takes the reservation_sem in write mode.
  236. *
  237. * @dev_priv: Driver private.
  238. * @buf: DMA buffer to unpin.
  239. * @interruptible: Use interruptible wait.
  240. * Return: Zero on success, Negative error code on failure. In particular
  241. * -ERESTARTSYS if interrupted by a signal
  242. */
  243. int vmw_bo_unpin(struct vmw_private *dev_priv,
  244. struct vmw_buffer_object *buf,
  245. bool interruptible)
  246. {
  247. struct ttm_buffer_object *bo = &buf->base;
  248. int ret;
  249. ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
  250. if (unlikely(ret != 0))
  251. return ret;
  252. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  253. if (unlikely(ret != 0))
  254. goto err;
  255. vmw_bo_pin_reserved(buf, false);
  256. ttm_bo_unreserve(bo);
  257. err:
  258. ttm_read_unlock(&dev_priv->reservation_sem);
  259. return ret;
  260. }
  261. /**
  262. * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
  263. * of a buffer.
  264. *
  265. * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
  266. * @ptr: SVGAGuestPtr returning the result.
  267. */
  268. void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
  269. SVGAGuestPtr *ptr)
  270. {
  271. if (bo->mem.mem_type == TTM_PL_VRAM) {
  272. ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
  273. ptr->offset = bo->offset;
  274. } else {
  275. ptr->gmrId = bo->mem.start;
  276. ptr->offset = 0;
  277. }
  278. }
  279. /**
  280. * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
  281. *
  282. * @vbo: The buffer object. Must be reserved.
  283. * @pin: Whether to pin or unpin.
  284. *
  285. */
  286. void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
  287. {
  288. struct ttm_operation_ctx ctx = { false, true };
  289. struct ttm_place pl;
  290. struct ttm_placement placement;
  291. struct ttm_buffer_object *bo = &vbo->base;
  292. uint32_t old_mem_type = bo->mem.mem_type;
  293. int ret;
  294. lockdep_assert_held(&bo->resv->lock.base);
  295. if (pin) {
  296. if (vbo->pin_count++ > 0)
  297. return;
  298. } else {
  299. WARN_ON(vbo->pin_count <= 0);
  300. if (--vbo->pin_count > 0)
  301. return;
  302. }
  303. pl.fpfn = 0;
  304. pl.lpfn = 0;
  305. pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
  306. | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  307. if (pin)
  308. pl.flags |= TTM_PL_FLAG_NO_EVICT;
  309. memset(&placement, 0, sizeof(placement));
  310. placement.num_placement = 1;
  311. placement.placement = &pl;
  312. ret = ttm_bo_validate(bo, &placement, &ctx);
  313. BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
  314. }
  315. /**
  316. * vmw_bo_map_and_cache - Map a buffer object and cache the map
  317. *
  318. * @vbo: The buffer object to map
  319. * Return: A kernel virtual address or NULL if mapping failed.
  320. *
  321. * This function maps a buffer object into the kernel address space, or
  322. * returns the virtual kernel address of an already existing map. The virtual
  323. * address remains valid as long as the buffer object is pinned or reserved.
  324. * The cached map is torn down on either
  325. * 1) Buffer object move
  326. * 2) Buffer object swapout
  327. * 3) Buffer object destruction
  328. *
  329. */
  330. void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
  331. {
  332. struct ttm_buffer_object *bo = &vbo->base;
  333. bool not_used;
  334. void *virtual;
  335. int ret;
  336. virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
  337. if (virtual)
  338. return virtual;
  339. ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
  340. if (ret)
  341. DRM_ERROR("Buffer object map failed: %d.\n", ret);
  342. return ttm_kmap_obj_virtual(&vbo->map, &not_used);
  343. }
  344. /**
  345. * vmw_bo_unmap - Tear down a cached buffer object map.
  346. *
  347. * @vbo: The buffer object whose map we are tearing down.
  348. *
  349. * This function tears down a cached map set up using
  350. * vmw_buffer_object_map_and_cache().
  351. */
  352. void vmw_bo_unmap(struct vmw_buffer_object *vbo)
  353. {
  354. if (vbo->map.bo == NULL)
  355. return;
  356. ttm_bo_kunmap(&vbo->map);
  357. }
  358. /**
  359. * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
  360. *
  361. * @dev_priv: Pointer to a struct vmw_private identifying the device.
  362. * @size: The requested buffer size.
  363. * @user: Whether this is an ordinary dma buffer or a user dma buffer.
  364. */
  365. static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
  366. bool user)
  367. {
  368. static size_t struct_size, user_struct_size;
  369. size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
  370. size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
  371. if (unlikely(struct_size == 0)) {
  372. size_t backend_size = ttm_round_pot(vmw_tt_size);
  373. struct_size = backend_size +
  374. ttm_round_pot(sizeof(struct vmw_buffer_object));
  375. user_struct_size = backend_size +
  376. ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
  377. TTM_OBJ_EXTRA_SIZE;
  378. }
  379. if (dev_priv->map_mode == vmw_dma_alloc_coherent)
  380. page_array_size +=
  381. ttm_round_pot(num_pages * sizeof(dma_addr_t));
  382. return ((user) ? user_struct_size : struct_size) +
  383. page_array_size;
  384. }
  385. /**
  386. * vmw_bo_bo_free - vmw buffer object destructor
  387. *
  388. * @bo: Pointer to the embedded struct ttm_buffer_object
  389. */
  390. void vmw_bo_bo_free(struct ttm_buffer_object *bo)
  391. {
  392. struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  393. vmw_bo_unmap(vmw_bo);
  394. kfree(vmw_bo);
  395. }
  396. /**
  397. * vmw_user_bo_destroy - vmw buffer object destructor
  398. *
  399. * @bo: Pointer to the embedded struct ttm_buffer_object
  400. */
  401. static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
  402. {
  403. struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
  404. vmw_bo_unmap(&vmw_user_bo->vbo);
  405. ttm_prime_object_kfree(vmw_user_bo, prime);
  406. }
  407. /**
  408. * vmw_bo_init - Initialize a vmw buffer object
  409. *
  410. * @dev_priv: Pointer to the device private struct
  411. * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
  412. * @size: Buffer object size in bytes.
  413. * @placement: Initial placement.
  414. * @interruptible: Whether waits should be performed interruptible.
  415. * @bo_free: The buffer object destructor.
  416. * Returns: Zero on success, negative error code on error.
  417. *
  418. * Note that on error, the code will free the buffer object.
  419. */
  420. int vmw_bo_init(struct vmw_private *dev_priv,
  421. struct vmw_buffer_object *vmw_bo,
  422. size_t size, struct ttm_placement *placement,
  423. bool interruptible,
  424. void (*bo_free)(struct ttm_buffer_object *bo))
  425. {
  426. struct ttm_bo_device *bdev = &dev_priv->bdev;
  427. size_t acc_size;
  428. int ret;
  429. bool user = (bo_free == &vmw_user_bo_destroy);
  430. WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
  431. acc_size = vmw_bo_acc_size(dev_priv, size, user);
  432. memset(vmw_bo, 0, sizeof(*vmw_bo));
  433. INIT_LIST_HEAD(&vmw_bo->res_list);
  434. ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  435. ttm_bo_type_device, placement,
  436. 0, interruptible, acc_size,
  437. NULL, NULL, bo_free);
  438. return ret;
  439. }
  440. /**
  441. * vmw_user_bo_release - TTM reference base object release callback for
  442. * vmw user buffer objects
  443. *
  444. * @p_base: The TTM base object pointer about to be unreferenced.
  445. *
  446. * Clears the TTM base object pointer and drops the reference the
  447. * base object has on the underlying struct vmw_buffer_object.
  448. */
  449. static void vmw_user_bo_release(struct ttm_base_object **p_base)
  450. {
  451. struct vmw_user_buffer_object *vmw_user_bo;
  452. struct ttm_base_object *base = *p_base;
  453. struct ttm_buffer_object *bo;
  454. *p_base = NULL;
  455. if (unlikely(base == NULL))
  456. return;
  457. vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
  458. prime.base);
  459. bo = &vmw_user_bo->vbo.base;
  460. ttm_bo_unref(&bo);
  461. }
  462. /**
  463. * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
  464. * for vmw user buffer objects
  465. *
  466. * @base: Pointer to the TTM base object
  467. * @ref_type: Reference type of the reference reaching zero.
  468. *
  469. * Called when user-space drops its last synccpu reference on the buffer
  470. * object, Either explicitly or as part of a cleanup file close.
  471. */
  472. static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
  473. enum ttm_ref_type ref_type)
  474. {
  475. struct vmw_user_buffer_object *user_bo;
  476. user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
  477. switch (ref_type) {
  478. case TTM_REF_SYNCCPU_WRITE:
  479. ttm_bo_synccpu_write_release(&user_bo->vbo.base);
  480. break;
  481. default:
  482. WARN_ONCE(true, "Undefined buffer object reference release.\n");
  483. }
  484. }
  485. /**
  486. * vmw_user_bo_alloc - Allocate a user buffer object
  487. *
  488. * @dev_priv: Pointer to a struct device private.
  489. * @tfile: Pointer to a struct ttm_object_file on which to register the user
  490. * object.
  491. * @size: Size of the buffer object.
  492. * @shareable: Boolean whether the buffer is shareable with other open files.
  493. * @handle: Pointer to where the handle value should be assigned.
  494. * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
  495. * should be assigned.
  496. * Return: Zero on success, negative error code on error.
  497. */
  498. int vmw_user_bo_alloc(struct vmw_private *dev_priv,
  499. struct ttm_object_file *tfile,
  500. uint32_t size,
  501. bool shareable,
  502. uint32_t *handle,
  503. struct vmw_buffer_object **p_vbo,
  504. struct ttm_base_object **p_base)
  505. {
  506. struct vmw_user_buffer_object *user_bo;
  507. struct ttm_buffer_object *tmp;
  508. int ret;
  509. user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
  510. if (unlikely(!user_bo)) {
  511. DRM_ERROR("Failed to allocate a buffer.\n");
  512. return -ENOMEM;
  513. }
  514. ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
  515. (dev_priv->has_mob) ?
  516. &vmw_sys_placement :
  517. &vmw_vram_sys_placement, true,
  518. &vmw_user_bo_destroy);
  519. if (unlikely(ret != 0))
  520. return ret;
  521. tmp = ttm_bo_reference(&user_bo->vbo.base);
  522. ret = ttm_prime_object_init(tfile,
  523. size,
  524. &user_bo->prime,
  525. shareable,
  526. ttm_buffer_type,
  527. &vmw_user_bo_release,
  528. &vmw_user_bo_ref_obj_release);
  529. if (unlikely(ret != 0)) {
  530. ttm_bo_unref(&tmp);
  531. goto out_no_base_object;
  532. }
  533. *p_vbo = &user_bo->vbo;
  534. if (p_base) {
  535. *p_base = &user_bo->prime.base;
  536. kref_get(&(*p_base)->refcount);
  537. }
  538. *handle = user_bo->prime.base.handle;
  539. out_no_base_object:
  540. return ret;
  541. }
  542. /**
  543. * vmw_user_bo_verify_access - verify access permissions on this
  544. * buffer object.
  545. *
  546. * @bo: Pointer to the buffer object being accessed
  547. * @tfile: Identifying the caller.
  548. */
  549. int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
  550. struct ttm_object_file *tfile)
  551. {
  552. struct vmw_user_buffer_object *vmw_user_bo;
  553. if (unlikely(bo->destroy != vmw_user_bo_destroy))
  554. return -EPERM;
  555. vmw_user_bo = vmw_user_buffer_object(bo);
  556. /* Check that the caller has opened the object. */
  557. if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
  558. return 0;
  559. DRM_ERROR("Could not grant buffer access.\n");
  560. return -EPERM;
  561. }
  562. /**
  563. * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
  564. * access, idling previous GPU operations on the buffer and optionally
  565. * blocking it for further command submissions.
  566. *
  567. * @user_bo: Pointer to the buffer object being grabbed for CPU access
  568. * @tfile: Identifying the caller.
  569. * @flags: Flags indicating how the grab should be performed.
  570. * Return: Zero on success, Negative error code on error. In particular,
  571. * -EBUSY will be returned if a dontblock operation is requested and the
  572. * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
  573. * interrupted by a signal.
  574. *
  575. * A blocking grab will be automatically released when @tfile is closed.
  576. */
  577. static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
  578. struct ttm_object_file *tfile,
  579. uint32_t flags)
  580. {
  581. struct ttm_buffer_object *bo = &user_bo->vbo.base;
  582. bool existed;
  583. int ret;
  584. if (flags & drm_vmw_synccpu_allow_cs) {
  585. bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
  586. long lret;
  587. lret = reservation_object_wait_timeout_rcu
  588. (bo->resv, true, true,
  589. nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
  590. if (!lret)
  591. return -EBUSY;
  592. else if (lret < 0)
  593. return lret;
  594. return 0;
  595. }
  596. ret = ttm_bo_synccpu_write_grab
  597. (bo, !!(flags & drm_vmw_synccpu_dontblock));
  598. if (unlikely(ret != 0))
  599. return ret;
  600. ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
  601. TTM_REF_SYNCCPU_WRITE, &existed, false);
  602. if (ret != 0 || existed)
  603. ttm_bo_synccpu_write_release(&user_bo->vbo.base);
  604. return ret;
  605. }
  606. /**
  607. * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
  608. * and unblock command submission on the buffer if blocked.
  609. *
  610. * @handle: Handle identifying the buffer object.
  611. * @tfile: Identifying the caller.
  612. * @flags: Flags indicating the type of release.
  613. */
  614. static int vmw_user_bo_synccpu_release(uint32_t handle,
  615. struct ttm_object_file *tfile,
  616. uint32_t flags)
  617. {
  618. if (!(flags & drm_vmw_synccpu_allow_cs))
  619. return ttm_ref_object_base_unref(tfile, handle,
  620. TTM_REF_SYNCCPU_WRITE);
  621. return 0;
  622. }
  623. /**
  624. * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
  625. * functionality.
  626. *
  627. * @dev: Identifies the drm device.
  628. * @data: Pointer to the ioctl argument.
  629. * @file_priv: Identifies the caller.
  630. * Return: Zero on success, negative error code on error.
  631. *
  632. * This function checks the ioctl arguments for validity and calls the
  633. * relevant synccpu functions.
  634. */
  635. int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
  636. struct drm_file *file_priv)
  637. {
  638. struct drm_vmw_synccpu_arg *arg =
  639. (struct drm_vmw_synccpu_arg *) data;
  640. struct vmw_buffer_object *vbo;
  641. struct vmw_user_buffer_object *user_bo;
  642. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  643. struct ttm_base_object *buffer_base;
  644. int ret;
  645. if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
  646. || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
  647. drm_vmw_synccpu_dontblock |
  648. drm_vmw_synccpu_allow_cs)) != 0) {
  649. DRM_ERROR("Illegal synccpu flags.\n");
  650. return -EINVAL;
  651. }
  652. switch (arg->op) {
  653. case drm_vmw_synccpu_grab:
  654. ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
  655. &buffer_base);
  656. if (unlikely(ret != 0))
  657. return ret;
  658. user_bo = container_of(vbo, struct vmw_user_buffer_object,
  659. vbo);
  660. ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
  661. vmw_bo_unreference(&vbo);
  662. ttm_base_object_unref(&buffer_base);
  663. if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
  664. ret != -EBUSY)) {
  665. DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
  666. (unsigned int) arg->handle);
  667. return ret;
  668. }
  669. break;
  670. case drm_vmw_synccpu_release:
  671. ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
  672. arg->flags);
  673. if (unlikely(ret != 0)) {
  674. DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
  675. (unsigned int) arg->handle);
  676. return ret;
  677. }
  678. break;
  679. default:
  680. DRM_ERROR("Invalid synccpu operation.\n");
  681. return -EINVAL;
  682. }
  683. return 0;
  684. }
  685. /**
  686. * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
  687. * allocation functionality.
  688. *
  689. * @dev: Identifies the drm device.
  690. * @data: Pointer to the ioctl argument.
  691. * @file_priv: Identifies the caller.
  692. * Return: Zero on success, negative error code on error.
  693. *
  694. * This function checks the ioctl arguments for validity and allocates a
  695. * struct vmw_user_buffer_object bo.
  696. */
  697. int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
  698. struct drm_file *file_priv)
  699. {
  700. struct vmw_private *dev_priv = vmw_priv(dev);
  701. union drm_vmw_alloc_dmabuf_arg *arg =
  702. (union drm_vmw_alloc_dmabuf_arg *)data;
  703. struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
  704. struct drm_vmw_dmabuf_rep *rep = &arg->rep;
  705. struct vmw_buffer_object *vbo;
  706. uint32_t handle;
  707. int ret;
  708. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  709. if (unlikely(ret != 0))
  710. return ret;
  711. ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
  712. req->size, false, &handle, &vbo,
  713. NULL);
  714. if (unlikely(ret != 0))
  715. goto out_no_bo;
  716. rep->handle = handle;
  717. rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
  718. rep->cur_gmr_id = handle;
  719. rep->cur_gmr_offset = 0;
  720. vmw_bo_unreference(&vbo);
  721. out_no_bo:
  722. ttm_read_unlock(&dev_priv->reservation_sem);
  723. return ret;
  724. }
  725. /**
  726. * vmw_bo_unref_ioctl - Generic handle close ioctl.
  727. *
  728. * @dev: Identifies the drm device.
  729. * @data: Pointer to the ioctl argument.
  730. * @file_priv: Identifies the caller.
  731. * Return: Zero on success, negative error code on error.
  732. *
  733. * This function checks the ioctl arguments for validity and closes a
  734. * handle to a TTM base object, optionally freeing the object.
  735. */
  736. int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
  737. struct drm_file *file_priv)
  738. {
  739. struct drm_vmw_unref_dmabuf_arg *arg =
  740. (struct drm_vmw_unref_dmabuf_arg *)data;
  741. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  742. arg->handle,
  743. TTM_REF_USAGE);
  744. }
  745. /**
  746. * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
  747. *
  748. * @tfile: The TTM object file the handle is registered with.
  749. * @handle: The user buffer object handle
  750. * @out: Pointer to a where a pointer to the embedded
  751. * struct vmw_buffer_object should be placed.
  752. * @p_base: Pointer to where a pointer to the TTM base object should be
  753. * placed, or NULL if no such pointer is required.
  754. * Return: Zero on success, Negative error code on error.
  755. *
  756. * Both the output base object pointer and the vmw buffer object pointer
  757. * will be refcounted.
  758. */
  759. int vmw_user_bo_lookup(struct ttm_object_file *tfile,
  760. uint32_t handle, struct vmw_buffer_object **out,
  761. struct ttm_base_object **p_base)
  762. {
  763. struct vmw_user_buffer_object *vmw_user_bo;
  764. struct ttm_base_object *base;
  765. base = ttm_base_object_lookup(tfile, handle);
  766. if (unlikely(base == NULL)) {
  767. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  768. (unsigned long)handle);
  769. return -ESRCH;
  770. }
  771. if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
  772. ttm_base_object_unref(&base);
  773. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  774. (unsigned long)handle);
  775. return -EINVAL;
  776. }
  777. vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
  778. prime.base);
  779. (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
  780. if (p_base)
  781. *p_base = base;
  782. else
  783. ttm_base_object_unref(&base);
  784. *out = &vmw_user_bo->vbo;
  785. return 0;
  786. }
  787. /**
  788. * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
  789. * @tfile: The TTM object file the handle is registered with.
  790. * @handle: The user buffer object handle.
  791. *
  792. * This function looks up a struct vmw_user_bo and returns a pointer to the
  793. * struct vmw_buffer_object it derives from without refcounting the pointer.
  794. * The returned pointer is only valid until vmw_user_bo_noref_release() is
  795. * called, and the object pointed to by the returned pointer may be doomed.
  796. * Any persistent usage of the object requires a refcount to be taken using
  797. * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
  798. * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
  799. * or scheduling functions may be called inbetween these function calls.
  800. *
  801. * Return: A struct vmw_buffer_object pointer if successful or negative
  802. * error pointer on failure.
  803. */
  804. struct vmw_buffer_object *
  805. vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
  806. {
  807. struct vmw_user_buffer_object *vmw_user_bo;
  808. struct ttm_base_object *base;
  809. base = ttm_base_object_noref_lookup(tfile, handle);
  810. if (!base) {
  811. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  812. (unsigned long)handle);
  813. return ERR_PTR(-ESRCH);
  814. }
  815. if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
  816. ttm_base_object_noref_release();
  817. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  818. (unsigned long)handle);
  819. return ERR_PTR(-EINVAL);
  820. }
  821. vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
  822. prime.base);
  823. return &vmw_user_bo->vbo;
  824. }
  825. /**
  826. * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
  827. *
  828. * @tfile: The TTM object file to register the handle with.
  829. * @vbo: The embedded vmw buffer object.
  830. * @handle: Pointer to where the new handle should be placed.
  831. * Return: Zero on success, Negative error code on error.
  832. */
  833. int vmw_user_bo_reference(struct ttm_object_file *tfile,
  834. struct vmw_buffer_object *vbo,
  835. uint32_t *handle)
  836. {
  837. struct vmw_user_buffer_object *user_bo;
  838. if (vbo->base.destroy != vmw_user_bo_destroy)
  839. return -EINVAL;
  840. user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
  841. *handle = user_bo->prime.base.handle;
  842. return ttm_ref_object_add(tfile, &user_bo->prime.base,
  843. TTM_REF_USAGE, NULL, false);
  844. }
  845. /**
  846. * vmw_bo_fence_single - Utility function to fence a single TTM buffer
  847. * object without unreserving it.
  848. *
  849. * @bo: Pointer to the struct ttm_buffer_object to fence.
  850. * @fence: Pointer to the fence. If NULL, this function will
  851. * insert a fence into the command stream..
  852. *
  853. * Contrary to the ttm_eu version of this function, it takes only
  854. * a single buffer object instead of a list, and it also doesn't
  855. * unreserve the buffer object, which needs to be done separately.
  856. */
  857. void vmw_bo_fence_single(struct ttm_buffer_object *bo,
  858. struct vmw_fence_obj *fence)
  859. {
  860. struct ttm_bo_device *bdev = bo->bdev;
  861. struct vmw_private *dev_priv =
  862. container_of(bdev, struct vmw_private, bdev);
  863. if (fence == NULL) {
  864. vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
  865. reservation_object_add_excl_fence(bo->resv, &fence->base);
  866. dma_fence_put(&fence->base);
  867. } else
  868. reservation_object_add_excl_fence(bo->resv, &fence->base);
  869. }
  870. /**
  871. * vmw_dumb_create - Create a dumb kms buffer
  872. *
  873. * @file_priv: Pointer to a struct drm_file identifying the caller.
  874. * @dev: Pointer to the drm device.
  875. * @args: Pointer to a struct drm_mode_create_dumb structure
  876. * Return: Zero on success, negative error code on failure.
  877. *
  878. * This is a driver callback for the core drm create_dumb functionality.
  879. * Note that this is very similar to the vmw_bo_alloc ioctl, except
  880. * that the arguments have a different format.
  881. */
  882. int vmw_dumb_create(struct drm_file *file_priv,
  883. struct drm_device *dev,
  884. struct drm_mode_create_dumb *args)
  885. {
  886. struct vmw_private *dev_priv = vmw_priv(dev);
  887. struct vmw_buffer_object *vbo;
  888. int ret;
  889. args->pitch = args->width * ((args->bpp + 7) / 8);
  890. args->size = args->pitch * args->height;
  891. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  892. if (unlikely(ret != 0))
  893. return ret;
  894. ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
  895. args->size, false, &args->handle,
  896. &vbo, NULL);
  897. if (unlikely(ret != 0))
  898. goto out_no_bo;
  899. vmw_bo_unreference(&vbo);
  900. out_no_bo:
  901. ttm_read_unlock(&dev_priv->reservation_sem);
  902. return ret;
  903. }
  904. /**
  905. * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
  906. *
  907. * @file_priv: Pointer to a struct drm_file identifying the caller.
  908. * @dev: Pointer to the drm device.
  909. * @handle: Handle identifying the dumb buffer.
  910. * @offset: The address space offset returned.
  911. * Return: Zero on success, negative error code on failure.
  912. *
  913. * This is a driver callback for the core drm dumb_map_offset functionality.
  914. */
  915. int vmw_dumb_map_offset(struct drm_file *file_priv,
  916. struct drm_device *dev, uint32_t handle,
  917. uint64_t *offset)
  918. {
  919. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  920. struct vmw_buffer_object *out_buf;
  921. int ret;
  922. ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
  923. if (ret != 0)
  924. return -EINVAL;
  925. *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
  926. vmw_bo_unreference(&out_buf);
  927. return 0;
  928. }
  929. /**
  930. * vmw_dumb_destroy - Destroy a dumb boffer
  931. *
  932. * @file_priv: Pointer to a struct drm_file identifying the caller.
  933. * @dev: Pointer to the drm device.
  934. * @handle: Handle identifying the dumb buffer.
  935. * Return: Zero on success, negative error code on failure.
  936. *
  937. * This is a driver callback for the core drm dumb_destroy functionality.
  938. */
  939. int vmw_dumb_destroy(struct drm_file *file_priv,
  940. struct drm_device *dev,
  941. uint32_t handle)
  942. {
  943. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  944. handle, TTM_REF_USAGE);
  945. }
  946. /**
  947. * vmw_bo_swap_notify - swapout notify callback.
  948. *
  949. * @bo: The buffer object to be swapped out.
  950. */
  951. void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
  952. {
  953. /* Is @bo embedded in a struct vmw_buffer_object? */
  954. if (bo->destroy != vmw_bo_bo_free &&
  955. bo->destroy != vmw_user_bo_destroy)
  956. return;
  957. /* Kill any cached kernel maps before swapout */
  958. vmw_bo_unmap(vmw_buffer_object(bo));
  959. }
  960. /**
  961. * vmw_bo_move_notify - TTM move_notify_callback
  962. *
  963. * @bo: The TTM buffer object about to move.
  964. * @mem: The struct ttm_mem_reg indicating to what memory
  965. * region the move is taking place.
  966. *
  967. * Detaches cached maps and device bindings that require that the
  968. * buffer doesn't move.
  969. */
  970. void vmw_bo_move_notify(struct ttm_buffer_object *bo,
  971. struct ttm_mem_reg *mem)
  972. {
  973. struct vmw_buffer_object *vbo;
  974. if (mem == NULL)
  975. return;
  976. /* Make sure @bo is embedded in a struct vmw_buffer_object? */
  977. if (bo->destroy != vmw_bo_bo_free &&
  978. bo->destroy != vmw_user_bo_destroy)
  979. return;
  980. vbo = container_of(bo, struct vmw_buffer_object, base);
  981. /*
  982. * Kill any cached kernel maps before move to or from VRAM.
  983. * With other types of moves, the underlying pages stay the same,
  984. * and the map can be kept.
  985. */
  986. if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
  987. vmw_bo_unmap(vbo);
  988. /*
  989. * If we're moving a backup MOB out of MOB placement, then make sure we
  990. * read back all resource content first, and unbind the MOB from
  991. * the resource.
  992. */
  993. if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
  994. vmw_resource_unbind_list(vbo);
  995. }