drm_syncobj.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * Copyright 2017 Red Hat
  3. * Parts ported from amdgpu (fence wait code).
  4. * Copyright 2016 Advanced Micro Devices, Inc.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  22. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  23. * IN THE SOFTWARE.
  24. *
  25. * Authors:
  26. *
  27. */
  28. /**
  29. * DOC: Overview
  30. *
  31. * DRM synchronisation objects (syncobj) are a persistent objects,
  32. * that contain an optional fence. The fence can be updated with a new
  33. * fence, or be NULL.
  34. *
  35. * syncobj's can be waited upon, where it will wait for the underlying
  36. * fence.
  37. *
  38. * syncobj's can be export to fd's and back, these fd's are opaque and
  39. * have no other use case, except passing the syncobj between processes.
  40. *
  41. * Their primary use-case is to implement Vulkan fences and semaphores.
  42. *
  43. * syncobj have a kref reference count, but also have an optional file.
  44. * The file is only created once the syncobj is exported.
  45. * The file takes a reference on the kref.
  46. */
  47. #include <drm/drmP.h>
  48. #include <linux/file.h>
  49. #include <linux/fs.h>
  50. #include <linux/anon_inodes.h>
  51. #include <linux/sync_file.h>
  52. #include "drm_internal.h"
  53. #include <drm/drm_syncobj.h>
  54. /**
  55. * drm_syncobj_find - lookup and reference a sync object.
  56. * @file_private: drm file private pointer
  57. * @handle: sync object handle to lookup.
  58. *
  59. * Returns a reference to the syncobj pointed to by handle or NULL.
  60. */
  61. struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
  62. u32 handle)
  63. {
  64. struct drm_syncobj *syncobj;
  65. spin_lock(&file_private->syncobj_table_lock);
  66. /* Check if we currently have a reference on the object */
  67. syncobj = idr_find(&file_private->syncobj_idr, handle);
  68. if (syncobj)
  69. drm_syncobj_get(syncobj);
  70. spin_unlock(&file_private->syncobj_table_lock);
  71. return syncobj;
  72. }
  73. EXPORT_SYMBOL(drm_syncobj_find);
  74. static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
  75. struct drm_syncobj_cb *cb,
  76. drm_syncobj_func_t func)
  77. {
  78. cb->func = func;
  79. list_add_tail(&cb->node, &syncobj->cb_list);
  80. }
  81. /**
  82. * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
  83. * @syncobj: Sync object to which to add the callback
  84. * @cb: Callback to add
  85. * @func: Func to use when initializing the drm_syncobj_cb struct
  86. *
  87. * This adds a callback to be called next time the fence is replaced
  88. */
  89. void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
  90. struct drm_syncobj_cb *cb,
  91. drm_syncobj_func_t func)
  92. {
  93. spin_lock(&syncobj->lock);
  94. drm_syncobj_add_callback_locked(syncobj, cb, func);
  95. spin_unlock(&syncobj->lock);
  96. }
  97. EXPORT_SYMBOL(drm_syncobj_add_callback);
  98. /**
  99. * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
  100. * @syncobj: Sync object from which to remove the callback
  101. * @cb: Callback to remove
  102. */
  103. void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
  104. struct drm_syncobj_cb *cb)
  105. {
  106. spin_lock(&syncobj->lock);
  107. list_del_init(&cb->node);
  108. spin_unlock(&syncobj->lock);
  109. }
  110. EXPORT_SYMBOL(drm_syncobj_remove_callback);
  111. /**
  112. * drm_syncobj_replace_fence - replace fence in a sync object.
  113. * @syncobj: Sync object to replace fence in
  114. * @fence: fence to install in sync file.
  115. *
  116. * This replaces the fence on a sync object.
  117. */
  118. void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
  119. struct dma_fence *fence)
  120. {
  121. struct dma_fence *old_fence;
  122. struct drm_syncobj_cb *cur, *tmp;
  123. if (fence)
  124. dma_fence_get(fence);
  125. spin_lock(&syncobj->lock);
  126. old_fence = syncobj->fence;
  127. syncobj->fence = fence;
  128. if (fence != old_fence) {
  129. list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
  130. list_del_init(&cur->node);
  131. cur->func(syncobj, cur);
  132. }
  133. }
  134. spin_unlock(&syncobj->lock);
  135. dma_fence_put(old_fence);
  136. }
  137. EXPORT_SYMBOL(drm_syncobj_replace_fence);
  138. int drm_syncobj_find_fence(struct drm_file *file_private,
  139. u32 handle,
  140. struct dma_fence **fence)
  141. {
  142. struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
  143. int ret = 0;
  144. if (!syncobj)
  145. return -ENOENT;
  146. *fence = drm_syncobj_fence_get(syncobj);
  147. if (!*fence) {
  148. ret = -EINVAL;
  149. }
  150. drm_syncobj_put(syncobj);
  151. return ret;
  152. }
  153. EXPORT_SYMBOL(drm_syncobj_find_fence);
  154. /**
  155. * drm_syncobj_free - free a sync object.
  156. * @kref: kref to free.
  157. *
  158. * Only to be called from kref_put in drm_syncobj_put.
  159. */
  160. void drm_syncobj_free(struct kref *kref)
  161. {
  162. struct drm_syncobj *syncobj = container_of(kref,
  163. struct drm_syncobj,
  164. refcount);
  165. drm_syncobj_replace_fence(syncobj, NULL);
  166. kfree(syncobj);
  167. }
  168. EXPORT_SYMBOL(drm_syncobj_free);
  169. static int drm_syncobj_create(struct drm_file *file_private,
  170. u32 *handle)
  171. {
  172. int ret;
  173. struct drm_syncobj *syncobj;
  174. syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
  175. if (!syncobj)
  176. return -ENOMEM;
  177. kref_init(&syncobj->refcount);
  178. INIT_LIST_HEAD(&syncobj->cb_list);
  179. spin_lock_init(&syncobj->lock);
  180. idr_preload(GFP_KERNEL);
  181. spin_lock(&file_private->syncobj_table_lock);
  182. ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
  183. spin_unlock(&file_private->syncobj_table_lock);
  184. idr_preload_end();
  185. if (ret < 0) {
  186. drm_syncobj_put(syncobj);
  187. return ret;
  188. }
  189. *handle = ret;
  190. return 0;
  191. }
  192. static int drm_syncobj_destroy(struct drm_file *file_private,
  193. u32 handle)
  194. {
  195. struct drm_syncobj *syncobj;
  196. spin_lock(&file_private->syncobj_table_lock);
  197. syncobj = idr_remove(&file_private->syncobj_idr, handle);
  198. spin_unlock(&file_private->syncobj_table_lock);
  199. if (!syncobj)
  200. return -EINVAL;
  201. drm_syncobj_put(syncobj);
  202. return 0;
  203. }
  204. static int drm_syncobj_file_release(struct inode *inode, struct file *file)
  205. {
  206. struct drm_syncobj *syncobj = file->private_data;
  207. drm_syncobj_put(syncobj);
  208. return 0;
  209. }
  210. static const struct file_operations drm_syncobj_file_fops = {
  211. .release = drm_syncobj_file_release,
  212. };
  213. static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
  214. {
  215. struct file *file = anon_inode_getfile("syncobj_file",
  216. &drm_syncobj_file_fops,
  217. syncobj, 0);
  218. if (IS_ERR(file))
  219. return PTR_ERR(file);
  220. drm_syncobj_get(syncobj);
  221. if (cmpxchg(&syncobj->file, NULL, file)) {
  222. /* lost the race */
  223. fput(file);
  224. }
  225. return 0;
  226. }
  227. static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
  228. u32 handle, int *p_fd)
  229. {
  230. struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
  231. int ret;
  232. int fd;
  233. if (!syncobj)
  234. return -EINVAL;
  235. fd = get_unused_fd_flags(O_CLOEXEC);
  236. if (fd < 0) {
  237. drm_syncobj_put(syncobj);
  238. return fd;
  239. }
  240. if (!syncobj->file) {
  241. ret = drm_syncobj_alloc_file(syncobj);
  242. if (ret)
  243. goto out_put_fd;
  244. }
  245. fd_install(fd, syncobj->file);
  246. drm_syncobj_put(syncobj);
  247. *p_fd = fd;
  248. return 0;
  249. out_put_fd:
  250. put_unused_fd(fd);
  251. drm_syncobj_put(syncobj);
  252. return ret;
  253. }
  254. static struct drm_syncobj *drm_syncobj_fdget(int fd)
  255. {
  256. struct file *file = fget(fd);
  257. if (!file)
  258. return NULL;
  259. if (file->f_op != &drm_syncobj_file_fops)
  260. goto err;
  261. return file->private_data;
  262. err:
  263. fput(file);
  264. return NULL;
  265. };
  266. static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
  267. int fd, u32 *handle)
  268. {
  269. struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
  270. int ret;
  271. if (!syncobj)
  272. return -EINVAL;
  273. /* take a reference to put in the idr */
  274. drm_syncobj_get(syncobj);
  275. idr_preload(GFP_KERNEL);
  276. spin_lock(&file_private->syncobj_table_lock);
  277. ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
  278. spin_unlock(&file_private->syncobj_table_lock);
  279. idr_preload_end();
  280. if (ret < 0) {
  281. fput(syncobj->file);
  282. return ret;
  283. }
  284. *handle = ret;
  285. return 0;
  286. }
  287. int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
  288. int fd, int handle)
  289. {
  290. struct dma_fence *fence = sync_file_get_fence(fd);
  291. struct drm_syncobj *syncobj;
  292. if (!fence)
  293. return -EINVAL;
  294. syncobj = drm_syncobj_find(file_private, handle);
  295. if (!syncobj) {
  296. dma_fence_put(fence);
  297. return -ENOENT;
  298. }
  299. drm_syncobj_replace_fence(syncobj, fence);
  300. dma_fence_put(fence);
  301. drm_syncobj_put(syncobj);
  302. return 0;
  303. }
  304. int drm_syncobj_export_sync_file(struct drm_file *file_private,
  305. int handle, int *p_fd)
  306. {
  307. int ret;
  308. struct dma_fence *fence;
  309. struct sync_file *sync_file;
  310. int fd = get_unused_fd_flags(O_CLOEXEC);
  311. if (fd < 0)
  312. return fd;
  313. ret = drm_syncobj_find_fence(file_private, handle, &fence);
  314. if (ret)
  315. goto err_put_fd;
  316. sync_file = sync_file_create(fence);
  317. dma_fence_put(fence);
  318. if (!sync_file) {
  319. ret = -EINVAL;
  320. goto err_put_fd;
  321. }
  322. fd_install(fd, sync_file->file);
  323. *p_fd = fd;
  324. return 0;
  325. err_put_fd:
  326. put_unused_fd(fd);
  327. return ret;
  328. }
  329. /**
  330. * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
  331. * @file_private: drm file-private structure to set up
  332. *
  333. * Called at device open time, sets up the structure for handling refcounting
  334. * of sync objects.
  335. */
  336. void
  337. drm_syncobj_open(struct drm_file *file_private)
  338. {
  339. idr_init(&file_private->syncobj_idr);
  340. spin_lock_init(&file_private->syncobj_table_lock);
  341. }
  342. static int
  343. drm_syncobj_release_handle(int id, void *ptr, void *data)
  344. {
  345. struct drm_syncobj *syncobj = ptr;
  346. drm_syncobj_put(syncobj);
  347. return 0;
  348. }
  349. /**
  350. * drm_syncobj_release - release file-private sync object resources
  351. * @file_private: drm file-private structure to clean up
  352. *
  353. * Called at close time when the filp is going away.
  354. *
  355. * Releases any remaining references on objects by this filp.
  356. */
  357. void
  358. drm_syncobj_release(struct drm_file *file_private)
  359. {
  360. idr_for_each(&file_private->syncobj_idr,
  361. &drm_syncobj_release_handle, file_private);
  362. idr_destroy(&file_private->syncobj_idr);
  363. }
  364. int
  365. drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
  366. struct drm_file *file_private)
  367. {
  368. struct drm_syncobj_create *args = data;
  369. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  370. return -ENODEV;
  371. /* no valid flags yet */
  372. if (args->flags)
  373. return -EINVAL;
  374. return drm_syncobj_create(file_private,
  375. &args->handle);
  376. }
  377. int
  378. drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
  379. struct drm_file *file_private)
  380. {
  381. struct drm_syncobj_destroy *args = data;
  382. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  383. return -ENODEV;
  384. /* make sure padding is empty */
  385. if (args->pad)
  386. return -EINVAL;
  387. return drm_syncobj_destroy(file_private, args->handle);
  388. }
  389. int
  390. drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
  391. struct drm_file *file_private)
  392. {
  393. struct drm_syncobj_handle *args = data;
  394. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  395. return -ENODEV;
  396. if (args->pad)
  397. return -EINVAL;
  398. if (args->flags != 0 &&
  399. args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
  400. return -EINVAL;
  401. if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
  402. return drm_syncobj_export_sync_file(file_private, args->handle,
  403. &args->fd);
  404. return drm_syncobj_handle_to_fd(file_private, args->handle,
  405. &args->fd);
  406. }
  407. int
  408. drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
  409. struct drm_file *file_private)
  410. {
  411. struct drm_syncobj_handle *args = data;
  412. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  413. return -ENODEV;
  414. if (args->pad)
  415. return -EINVAL;
  416. if (args->flags != 0 &&
  417. args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
  418. return -EINVAL;
  419. if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
  420. return drm_syncobj_import_sync_file_fence(file_private,
  421. args->fd,
  422. args->handle);
  423. return drm_syncobj_fd_to_handle(file_private, args->fd,
  424. &args->handle);
  425. }
  426. /**
  427. * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
  428. *
  429. * @timeout_nsec: timeout nsec component in ns, 0 for poll
  430. *
  431. * Calculate the timeout in jiffies from an absolute time in sec/nsec.
  432. */
  433. static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
  434. {
  435. ktime_t abs_timeout, now;
  436. u64 timeout_ns, timeout_jiffies64;
  437. /* make 0 timeout means poll - absolute 0 doesn't seem valid */
  438. if (timeout_nsec == 0)
  439. return 0;
  440. abs_timeout = ns_to_ktime(timeout_nsec);
  441. now = ktime_get();
  442. if (!ktime_after(abs_timeout, now))
  443. return 0;
  444. timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
  445. timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
  446. /* clamp timeout to avoid infinite timeout */
  447. if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
  448. return MAX_SCHEDULE_TIMEOUT - 1;
  449. return timeout_jiffies64 + 1;
  450. }
  451. static int drm_syncobj_wait_fences(struct drm_device *dev,
  452. struct drm_file *file_private,
  453. struct drm_syncobj_wait *wait,
  454. struct dma_fence **fences)
  455. {
  456. signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
  457. signed long ret = 0;
  458. uint32_t first = ~0;
  459. if (wait->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
  460. uint32_t i;
  461. for (i = 0; i < wait->count_handles; i++) {
  462. ret = dma_fence_wait_timeout(fences[i], true, timeout);
  463. /* Various dma_fence wait callbacks will return
  464. * ENOENT to indicate that the fence has already
  465. * been signaled. We need to sanitize this to 0 so
  466. * we don't return early and the client doesn't see
  467. * an unexpected error.
  468. */
  469. if (ret == -ENOENT)
  470. ret = 0;
  471. if (ret < 0)
  472. return ret;
  473. if (ret == 0)
  474. break;
  475. timeout = ret;
  476. }
  477. first = 0;
  478. } else {
  479. ret = dma_fence_wait_any_timeout(fences,
  480. wait->count_handles,
  481. true, timeout,
  482. &first);
  483. }
  484. if (ret < 0)
  485. return ret;
  486. wait->first_signaled = first;
  487. if (ret == 0)
  488. return -ETIME;
  489. return 0;
  490. }
  491. int
  492. drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
  493. struct drm_file *file_private)
  494. {
  495. struct drm_syncobj_wait *args = data;
  496. uint32_t *handles;
  497. struct dma_fence **fences;
  498. int ret = 0;
  499. uint32_t i;
  500. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  501. return -ENODEV;
  502. if (args->flags != 0 && args->flags != DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
  503. return -EINVAL;
  504. if (args->count_handles == 0)
  505. return -EINVAL;
  506. /* Get the handles from userspace */
  507. handles = kmalloc_array(args->count_handles, sizeof(uint32_t),
  508. GFP_KERNEL);
  509. if (handles == NULL)
  510. return -ENOMEM;
  511. if (copy_from_user(handles,
  512. u64_to_user_ptr(args->handles),
  513. sizeof(uint32_t) * args->count_handles)) {
  514. ret = -EFAULT;
  515. goto err_free_handles;
  516. }
  517. fences = kcalloc(args->count_handles,
  518. sizeof(struct dma_fence *), GFP_KERNEL);
  519. if (!fences) {
  520. ret = -ENOMEM;
  521. goto err_free_handles;
  522. }
  523. for (i = 0; i < args->count_handles; i++) {
  524. ret = drm_syncobj_find_fence(file_private, handles[i],
  525. &fences[i]);
  526. if (ret)
  527. goto err_free_fence_array;
  528. }
  529. ret = drm_syncobj_wait_fences(dev, file_private,
  530. args, fences);
  531. err_free_fence_array:
  532. for (i = 0; i < args->count_handles; i++)
  533. dma_fence_put(fences[i]);
  534. kfree(fences);
  535. err_free_handles:
  536. kfree(handles);
  537. return ret;
  538. }