drm_syncobj.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993
  1. /*
  2. * Copyright 2017 Red Hat
  3. * Parts ported from amdgpu (fence wait code).
  4. * Copyright 2016 Advanced Micro Devices, Inc.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  22. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  23. * IN THE SOFTWARE.
  24. *
  25. * Authors:
  26. *
  27. */
  28. /**
  29. * DOC: Overview
  30. *
  31. * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
  32. * persistent objects that contain an optional fence. The fence can be updated
  33. * with a new fence, or be NULL.
  34. *
  35. * syncobj's can be waited upon, where it will wait for the underlying
  36. * fence.
  37. *
  38. * syncobj's can be export to fd's and back, these fd's are opaque and
  39. * have no other use case, except passing the syncobj between processes.
  40. *
  41. * Their primary use-case is to implement Vulkan fences and semaphores.
  42. *
  43. * syncobj have a kref reference count, but also have an optional file.
  44. * The file is only created once the syncobj is exported.
  45. * The file takes a reference on the kref.
  46. */
  47. #include <drm/drmP.h>
  48. #include <linux/file.h>
  49. #include <linux/fs.h>
  50. #include <linux/anon_inodes.h>
  51. #include <linux/sync_file.h>
  52. #include <linux/sched/signal.h>
  53. #include "drm_internal.h"
  54. #include <drm/drm_syncobj.h>
  55. struct drm_syncobj_stub_fence {
  56. struct dma_fence base;
  57. spinlock_t lock;
  58. };
  59. static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
  60. {
  61. return "syncobjstub";
  62. }
  63. static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
  64. .get_driver_name = drm_syncobj_stub_fence_get_name,
  65. .get_timeline_name = drm_syncobj_stub_fence_get_name,
  66. };
  67. /**
  68. * drm_syncobj_find - lookup and reference a sync object.
  69. * @file_private: drm file private pointer
  70. * @handle: sync object handle to lookup.
  71. *
  72. * Returns a reference to the syncobj pointed to by handle or NULL. The
  73. * reference must be released by calling drm_syncobj_put().
  74. */
  75. struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
  76. u32 handle)
  77. {
  78. struct drm_syncobj *syncobj;
  79. spin_lock(&file_private->syncobj_table_lock);
  80. /* Check if we currently have a reference on the object */
  81. syncobj = idr_find(&file_private->syncobj_idr, handle);
  82. if (syncobj)
  83. drm_syncobj_get(syncobj);
  84. spin_unlock(&file_private->syncobj_table_lock);
  85. return syncobj;
  86. }
  87. EXPORT_SYMBOL(drm_syncobj_find);
  88. static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
  89. struct drm_syncobj_cb *cb,
  90. drm_syncobj_func_t func)
  91. {
  92. cb->func = func;
  93. list_add_tail(&cb->node, &syncobj->cb_list);
  94. }
  95. static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
  96. struct dma_fence **fence,
  97. struct drm_syncobj_cb *cb,
  98. drm_syncobj_func_t func)
  99. {
  100. int ret;
  101. *fence = drm_syncobj_fence_get(syncobj);
  102. if (*fence)
  103. return 1;
  104. spin_lock(&syncobj->lock);
  105. /* We've already tried once to get a fence and failed. Now that we
  106. * have the lock, try one more time just to be sure we don't add a
  107. * callback when a fence has already been set.
  108. */
  109. if (syncobj->fence) {
  110. *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
  111. lockdep_is_held(&syncobj->lock)));
  112. ret = 1;
  113. } else {
  114. *fence = NULL;
  115. drm_syncobj_add_callback_locked(syncobj, cb, func);
  116. ret = 0;
  117. }
  118. spin_unlock(&syncobj->lock);
  119. return ret;
  120. }
  121. void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
  122. struct drm_syncobj_cb *cb,
  123. drm_syncobj_func_t func)
  124. {
  125. spin_lock(&syncobj->lock);
  126. drm_syncobj_add_callback_locked(syncobj, cb, func);
  127. spin_unlock(&syncobj->lock);
  128. }
  129. void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
  130. struct drm_syncobj_cb *cb)
  131. {
  132. spin_lock(&syncobj->lock);
  133. list_del_init(&cb->node);
  134. spin_unlock(&syncobj->lock);
  135. }
  136. /**
  137. * drm_syncobj_replace_fence - replace fence in a sync object.
  138. * @syncobj: Sync object to replace fence in
  139. * @point: timeline point
  140. * @fence: fence to install in sync file.
  141. *
  142. * This replaces the fence on a sync object, or a timeline point fence.
  143. */
  144. void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
  145. u64 point,
  146. struct dma_fence *fence)
  147. {
  148. struct dma_fence *old_fence;
  149. struct drm_syncobj_cb *cur, *tmp;
  150. if (fence)
  151. dma_fence_get(fence);
  152. spin_lock(&syncobj->lock);
  153. old_fence = rcu_dereference_protected(syncobj->fence,
  154. lockdep_is_held(&syncobj->lock));
  155. rcu_assign_pointer(syncobj->fence, fence);
  156. if (fence != old_fence) {
  157. list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
  158. list_del_init(&cur->node);
  159. cur->func(syncobj, cur);
  160. }
  161. }
  162. spin_unlock(&syncobj->lock);
  163. dma_fence_put(old_fence);
  164. }
  165. EXPORT_SYMBOL(drm_syncobj_replace_fence);
  166. static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
  167. {
  168. struct drm_syncobj_stub_fence *fence;
  169. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  170. if (fence == NULL)
  171. return -ENOMEM;
  172. spin_lock_init(&fence->lock);
  173. dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
  174. &fence->lock, 0, 0);
  175. dma_fence_signal(&fence->base);
  176. drm_syncobj_replace_fence(syncobj, 0, &fence->base);
  177. dma_fence_put(&fence->base);
  178. return 0;
  179. }
  180. /**
  181. * drm_syncobj_find_fence - lookup and reference the fence in a sync object
  182. * @file_private: drm file private pointer
  183. * @handle: sync object handle to lookup.
  184. * @point: timeline point
  185. * @fence: out parameter for the fence
  186. *
  187. * This is just a convenience function that combines drm_syncobj_find() and
  188. * drm_syncobj_fence_get().
  189. *
  190. * Returns 0 on success or a negative error value on failure. On success @fence
  191. * contains a reference to the fence, which must be released by calling
  192. * dma_fence_put().
  193. */
  194. int drm_syncobj_find_fence(struct drm_file *file_private,
  195. u32 handle, u64 point,
  196. struct dma_fence **fence)
  197. {
  198. struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
  199. int ret = 0;
  200. if (!syncobj)
  201. return -ENOENT;
  202. *fence = drm_syncobj_fence_get(syncobj);
  203. if (!*fence) {
  204. ret = -EINVAL;
  205. }
  206. drm_syncobj_put(syncobj);
  207. return ret;
  208. }
  209. EXPORT_SYMBOL(drm_syncobj_find_fence);
  210. /**
  211. * drm_syncobj_free - free a sync object.
  212. * @kref: kref to free.
  213. *
  214. * Only to be called from kref_put in drm_syncobj_put.
  215. */
  216. void drm_syncobj_free(struct kref *kref)
  217. {
  218. struct drm_syncobj *syncobj = container_of(kref,
  219. struct drm_syncobj,
  220. refcount);
  221. drm_syncobj_replace_fence(syncobj, 0, NULL);
  222. kfree(syncobj);
  223. }
  224. EXPORT_SYMBOL(drm_syncobj_free);
  225. /**
  226. * drm_syncobj_create - create a new syncobj
  227. * @out_syncobj: returned syncobj
  228. * @flags: DRM_SYNCOBJ_* flags
  229. * @fence: if non-NULL, the syncobj will represent this fence
  230. *
  231. * This is the first function to create a sync object. After creating, drivers
  232. * probably want to make it available to userspace, either through
  233. * drm_syncobj_get_handle() or drm_syncobj_get_fd().
  234. *
  235. * Returns 0 on success or a negative error value on failure.
  236. */
  237. int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
  238. struct dma_fence *fence)
  239. {
  240. int ret;
  241. struct drm_syncobj *syncobj;
  242. syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
  243. if (!syncobj)
  244. return -ENOMEM;
  245. kref_init(&syncobj->refcount);
  246. INIT_LIST_HEAD(&syncobj->cb_list);
  247. spin_lock_init(&syncobj->lock);
  248. if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
  249. ret = drm_syncobj_assign_null_handle(syncobj);
  250. if (ret < 0) {
  251. drm_syncobj_put(syncobj);
  252. return ret;
  253. }
  254. }
  255. if (fence)
  256. drm_syncobj_replace_fence(syncobj, 0, fence);
  257. *out_syncobj = syncobj;
  258. return 0;
  259. }
  260. EXPORT_SYMBOL(drm_syncobj_create);
  261. /**
  262. * drm_syncobj_get_handle - get a handle from a syncobj
  263. * @file_private: drm file private pointer
  264. * @syncobj: Sync object to export
  265. * @handle: out parameter with the new handle
  266. *
  267. * Exports a sync object created with drm_syncobj_create() as a handle on
  268. * @file_private to userspace.
  269. *
  270. * Returns 0 on success or a negative error value on failure.
  271. */
  272. int drm_syncobj_get_handle(struct drm_file *file_private,
  273. struct drm_syncobj *syncobj, u32 *handle)
  274. {
  275. int ret;
  276. /* take a reference to put in the idr */
  277. drm_syncobj_get(syncobj);
  278. idr_preload(GFP_KERNEL);
  279. spin_lock(&file_private->syncobj_table_lock);
  280. ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
  281. spin_unlock(&file_private->syncobj_table_lock);
  282. idr_preload_end();
  283. if (ret < 0) {
  284. drm_syncobj_put(syncobj);
  285. return ret;
  286. }
  287. *handle = ret;
  288. return 0;
  289. }
  290. EXPORT_SYMBOL(drm_syncobj_get_handle);
  291. static int drm_syncobj_create_as_handle(struct drm_file *file_private,
  292. u32 *handle, uint32_t flags)
  293. {
  294. int ret;
  295. struct drm_syncobj *syncobj;
  296. ret = drm_syncobj_create(&syncobj, flags, NULL);
  297. if (ret)
  298. return ret;
  299. ret = drm_syncobj_get_handle(file_private, syncobj, handle);
  300. drm_syncobj_put(syncobj);
  301. return ret;
  302. }
  303. static int drm_syncobj_destroy(struct drm_file *file_private,
  304. u32 handle)
  305. {
  306. struct drm_syncobj *syncobj;
  307. spin_lock(&file_private->syncobj_table_lock);
  308. syncobj = idr_remove(&file_private->syncobj_idr, handle);
  309. spin_unlock(&file_private->syncobj_table_lock);
  310. if (!syncobj)
  311. return -EINVAL;
  312. drm_syncobj_put(syncobj);
  313. return 0;
  314. }
  315. static int drm_syncobj_file_release(struct inode *inode, struct file *file)
  316. {
  317. struct drm_syncobj *syncobj = file->private_data;
  318. drm_syncobj_put(syncobj);
  319. return 0;
  320. }
  321. static const struct file_operations drm_syncobj_file_fops = {
  322. .release = drm_syncobj_file_release,
  323. };
  324. /**
  325. * drm_syncobj_get_fd - get a file descriptor from a syncobj
  326. * @syncobj: Sync object to export
  327. * @p_fd: out parameter with the new file descriptor
  328. *
  329. * Exports a sync object created with drm_syncobj_create() as a file descriptor.
  330. *
  331. * Returns 0 on success or a negative error value on failure.
  332. */
  333. int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
  334. {
  335. struct file *file;
  336. int fd;
  337. fd = get_unused_fd_flags(O_CLOEXEC);
  338. if (fd < 0)
  339. return fd;
  340. file = anon_inode_getfile("syncobj_file",
  341. &drm_syncobj_file_fops,
  342. syncobj, 0);
  343. if (IS_ERR(file)) {
  344. put_unused_fd(fd);
  345. return PTR_ERR(file);
  346. }
  347. drm_syncobj_get(syncobj);
  348. fd_install(fd, file);
  349. *p_fd = fd;
  350. return 0;
  351. }
  352. EXPORT_SYMBOL(drm_syncobj_get_fd);
  353. static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
  354. u32 handle, int *p_fd)
  355. {
  356. struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
  357. int ret;
  358. if (!syncobj)
  359. return -EINVAL;
  360. ret = drm_syncobj_get_fd(syncobj, p_fd);
  361. drm_syncobj_put(syncobj);
  362. return ret;
  363. }
  364. static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
  365. int fd, u32 *handle)
  366. {
  367. struct drm_syncobj *syncobj;
  368. struct file *file;
  369. int ret;
  370. file = fget(fd);
  371. if (!file)
  372. return -EINVAL;
  373. if (file->f_op != &drm_syncobj_file_fops) {
  374. fput(file);
  375. return -EINVAL;
  376. }
  377. /* take a reference to put in the idr */
  378. syncobj = file->private_data;
  379. drm_syncobj_get(syncobj);
  380. idr_preload(GFP_KERNEL);
  381. spin_lock(&file_private->syncobj_table_lock);
  382. ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
  383. spin_unlock(&file_private->syncobj_table_lock);
  384. idr_preload_end();
  385. if (ret > 0) {
  386. *handle = ret;
  387. ret = 0;
  388. } else
  389. drm_syncobj_put(syncobj);
  390. fput(file);
  391. return ret;
  392. }
  393. static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
  394. int fd, int handle)
  395. {
  396. struct dma_fence *fence = sync_file_get_fence(fd);
  397. struct drm_syncobj *syncobj;
  398. if (!fence)
  399. return -EINVAL;
  400. syncobj = drm_syncobj_find(file_private, handle);
  401. if (!syncobj) {
  402. dma_fence_put(fence);
  403. return -ENOENT;
  404. }
  405. drm_syncobj_replace_fence(syncobj, 0, fence);
  406. dma_fence_put(fence);
  407. drm_syncobj_put(syncobj);
  408. return 0;
  409. }
  410. static int drm_syncobj_export_sync_file(struct drm_file *file_private,
  411. int handle, int *p_fd)
  412. {
  413. int ret;
  414. struct dma_fence *fence;
  415. struct sync_file *sync_file;
  416. int fd = get_unused_fd_flags(O_CLOEXEC);
  417. if (fd < 0)
  418. return fd;
  419. ret = drm_syncobj_find_fence(file_private, handle, 0, &fence);
  420. if (ret)
  421. goto err_put_fd;
  422. sync_file = sync_file_create(fence);
  423. dma_fence_put(fence);
  424. if (!sync_file) {
  425. ret = -EINVAL;
  426. goto err_put_fd;
  427. }
  428. fd_install(fd, sync_file->file);
  429. *p_fd = fd;
  430. return 0;
  431. err_put_fd:
  432. put_unused_fd(fd);
  433. return ret;
  434. }
  435. /**
  436. * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
  437. * @file_private: drm file-private structure to set up
  438. *
  439. * Called at device open time, sets up the structure for handling refcounting
  440. * of sync objects.
  441. */
  442. void
  443. drm_syncobj_open(struct drm_file *file_private)
  444. {
  445. idr_init_base(&file_private->syncobj_idr, 1);
  446. spin_lock_init(&file_private->syncobj_table_lock);
  447. }
  448. static int
  449. drm_syncobj_release_handle(int id, void *ptr, void *data)
  450. {
  451. struct drm_syncobj *syncobj = ptr;
  452. drm_syncobj_put(syncobj);
  453. return 0;
  454. }
  455. /**
  456. * drm_syncobj_release - release file-private sync object resources
  457. * @file_private: drm file-private structure to clean up
  458. *
  459. * Called at close time when the filp is going away.
  460. *
  461. * Releases any remaining references on objects by this filp.
  462. */
  463. void
  464. drm_syncobj_release(struct drm_file *file_private)
  465. {
  466. idr_for_each(&file_private->syncobj_idr,
  467. &drm_syncobj_release_handle, file_private);
  468. idr_destroy(&file_private->syncobj_idr);
  469. }
  470. int
  471. drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
  472. struct drm_file *file_private)
  473. {
  474. struct drm_syncobj_create *args = data;
  475. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  476. return -EOPNOTSUPP;
  477. /* no valid flags yet */
  478. if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
  479. return -EINVAL;
  480. return drm_syncobj_create_as_handle(file_private,
  481. &args->handle, args->flags);
  482. }
  483. int
  484. drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
  485. struct drm_file *file_private)
  486. {
  487. struct drm_syncobj_destroy *args = data;
  488. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  489. return -EOPNOTSUPP;
  490. /* make sure padding is empty */
  491. if (args->pad)
  492. return -EINVAL;
  493. return drm_syncobj_destroy(file_private, args->handle);
  494. }
  495. int
  496. drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
  497. struct drm_file *file_private)
  498. {
  499. struct drm_syncobj_handle *args = data;
  500. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  501. return -EOPNOTSUPP;
  502. if (args->pad)
  503. return -EINVAL;
  504. if (args->flags != 0 &&
  505. args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
  506. return -EINVAL;
  507. if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
  508. return drm_syncobj_export_sync_file(file_private, args->handle,
  509. &args->fd);
  510. return drm_syncobj_handle_to_fd(file_private, args->handle,
  511. &args->fd);
  512. }
  513. int
  514. drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
  515. struct drm_file *file_private)
  516. {
  517. struct drm_syncobj_handle *args = data;
  518. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  519. return -EOPNOTSUPP;
  520. if (args->pad)
  521. return -EINVAL;
  522. if (args->flags != 0 &&
  523. args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
  524. return -EINVAL;
  525. if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
  526. return drm_syncobj_import_sync_file_fence(file_private,
  527. args->fd,
  528. args->handle);
  529. return drm_syncobj_fd_to_handle(file_private, args->fd,
  530. &args->handle);
  531. }
  532. struct syncobj_wait_entry {
  533. struct task_struct *task;
  534. struct dma_fence *fence;
  535. struct dma_fence_cb fence_cb;
  536. struct drm_syncobj_cb syncobj_cb;
  537. };
  538. static void syncobj_wait_fence_func(struct dma_fence *fence,
  539. struct dma_fence_cb *cb)
  540. {
  541. struct syncobj_wait_entry *wait =
  542. container_of(cb, struct syncobj_wait_entry, fence_cb);
  543. wake_up_process(wait->task);
  544. }
  545. static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
  546. struct drm_syncobj_cb *cb)
  547. {
  548. struct syncobj_wait_entry *wait =
  549. container_of(cb, struct syncobj_wait_entry, syncobj_cb);
  550. /* This happens inside the syncobj lock */
  551. wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
  552. lockdep_is_held(&syncobj->lock)));
  553. wake_up_process(wait->task);
  554. }
  555. static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
  556. uint32_t count,
  557. uint32_t flags,
  558. signed long timeout,
  559. uint32_t *idx)
  560. {
  561. struct syncobj_wait_entry *entries;
  562. struct dma_fence *fence;
  563. uint32_t signaled_count, i;
  564. entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
  565. if (!entries)
  566. return -ENOMEM;
  567. /* Walk the list of sync objects and initialize entries. We do
  568. * this up-front so that we can properly return -EINVAL if there is
  569. * a syncobj with a missing fence and then never have the chance of
  570. * returning -EINVAL again.
  571. */
  572. signaled_count = 0;
  573. for (i = 0; i < count; ++i) {
  574. entries[i].task = current;
  575. entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
  576. if (!entries[i].fence) {
  577. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
  578. continue;
  579. } else {
  580. timeout = -EINVAL;
  581. goto cleanup_entries;
  582. }
  583. }
  584. if (dma_fence_is_signaled(entries[i].fence)) {
  585. if (signaled_count == 0 && idx)
  586. *idx = i;
  587. signaled_count++;
  588. }
  589. }
  590. if (signaled_count == count ||
  591. (signaled_count > 0 &&
  592. !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
  593. goto cleanup_entries;
  594. /* There's a very annoying laxness in the dma_fence API here, in
  595. * that backends are not required to automatically report when a
  596. * fence is signaled prior to fence->ops->enable_signaling() being
  597. * called. So here if we fail to match signaled_count, we need to
  598. * fallthough and try a 0 timeout wait!
  599. */
  600. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
  601. for (i = 0; i < count; ++i) {
  602. drm_syncobj_fence_get_or_add_callback(syncobjs[i],
  603. &entries[i].fence,
  604. &entries[i].syncobj_cb,
  605. syncobj_wait_syncobj_func);
  606. }
  607. }
  608. do {
  609. set_current_state(TASK_INTERRUPTIBLE);
  610. signaled_count = 0;
  611. for (i = 0; i < count; ++i) {
  612. fence = entries[i].fence;
  613. if (!fence)
  614. continue;
  615. if (dma_fence_is_signaled(fence) ||
  616. (!entries[i].fence_cb.func &&
  617. dma_fence_add_callback(fence,
  618. &entries[i].fence_cb,
  619. syncobj_wait_fence_func))) {
  620. /* The fence has been signaled */
  621. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
  622. signaled_count++;
  623. } else {
  624. if (idx)
  625. *idx = i;
  626. goto done_waiting;
  627. }
  628. }
  629. }
  630. if (signaled_count == count)
  631. goto done_waiting;
  632. if (timeout == 0) {
  633. timeout = -ETIME;
  634. goto done_waiting;
  635. }
  636. if (signal_pending(current)) {
  637. timeout = -ERESTARTSYS;
  638. goto done_waiting;
  639. }
  640. timeout = schedule_timeout(timeout);
  641. } while (1);
  642. done_waiting:
  643. __set_current_state(TASK_RUNNING);
  644. cleanup_entries:
  645. for (i = 0; i < count; ++i) {
  646. if (entries[i].syncobj_cb.func)
  647. drm_syncobj_remove_callback(syncobjs[i],
  648. &entries[i].syncobj_cb);
  649. if (entries[i].fence_cb.func)
  650. dma_fence_remove_callback(entries[i].fence,
  651. &entries[i].fence_cb);
  652. dma_fence_put(entries[i].fence);
  653. }
  654. kfree(entries);
  655. return timeout;
  656. }
  657. /**
  658. * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
  659. *
  660. * @timeout_nsec: timeout nsec component in ns, 0 for poll
  661. *
  662. * Calculate the timeout in jiffies from an absolute time in sec/nsec.
  663. */
  664. static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
  665. {
  666. ktime_t abs_timeout, now;
  667. u64 timeout_ns, timeout_jiffies64;
  668. /* make 0 timeout means poll - absolute 0 doesn't seem valid */
  669. if (timeout_nsec == 0)
  670. return 0;
  671. abs_timeout = ns_to_ktime(timeout_nsec);
  672. now = ktime_get();
  673. if (!ktime_after(abs_timeout, now))
  674. return 0;
  675. timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
  676. timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
  677. /* clamp timeout to avoid infinite timeout */
  678. if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
  679. return MAX_SCHEDULE_TIMEOUT - 1;
  680. return timeout_jiffies64 + 1;
  681. }
  682. static int drm_syncobj_array_wait(struct drm_device *dev,
  683. struct drm_file *file_private,
  684. struct drm_syncobj_wait *wait,
  685. struct drm_syncobj **syncobjs)
  686. {
  687. signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
  688. uint32_t first = ~0;
  689. timeout = drm_syncobj_array_wait_timeout(syncobjs,
  690. wait->count_handles,
  691. wait->flags,
  692. timeout, &first);
  693. if (timeout < 0)
  694. return timeout;
  695. wait->first_signaled = first;
  696. return 0;
  697. }
  698. static int drm_syncobj_array_find(struct drm_file *file_private,
  699. void __user *user_handles,
  700. uint32_t count_handles,
  701. struct drm_syncobj ***syncobjs_out)
  702. {
  703. uint32_t i, *handles;
  704. struct drm_syncobj **syncobjs;
  705. int ret;
  706. handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
  707. if (handles == NULL)
  708. return -ENOMEM;
  709. if (copy_from_user(handles, user_handles,
  710. sizeof(uint32_t) * count_handles)) {
  711. ret = -EFAULT;
  712. goto err_free_handles;
  713. }
  714. syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
  715. if (syncobjs == NULL) {
  716. ret = -ENOMEM;
  717. goto err_free_handles;
  718. }
  719. for (i = 0; i < count_handles; i++) {
  720. syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
  721. if (!syncobjs[i]) {
  722. ret = -ENOENT;
  723. goto err_put_syncobjs;
  724. }
  725. }
  726. kfree(handles);
  727. *syncobjs_out = syncobjs;
  728. return 0;
  729. err_put_syncobjs:
  730. while (i-- > 0)
  731. drm_syncobj_put(syncobjs[i]);
  732. kfree(syncobjs);
  733. err_free_handles:
  734. kfree(handles);
  735. return ret;
  736. }
  737. static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
  738. uint32_t count)
  739. {
  740. uint32_t i;
  741. for (i = 0; i < count; i++)
  742. drm_syncobj_put(syncobjs[i]);
  743. kfree(syncobjs);
  744. }
  745. int
  746. drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
  747. struct drm_file *file_private)
  748. {
  749. struct drm_syncobj_wait *args = data;
  750. struct drm_syncobj **syncobjs;
  751. int ret = 0;
  752. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  753. return -EOPNOTSUPP;
  754. if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
  755. DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
  756. return -EINVAL;
  757. if (args->count_handles == 0)
  758. return -EINVAL;
  759. ret = drm_syncobj_array_find(file_private,
  760. u64_to_user_ptr(args->handles),
  761. args->count_handles,
  762. &syncobjs);
  763. if (ret < 0)
  764. return ret;
  765. ret = drm_syncobj_array_wait(dev, file_private,
  766. args, syncobjs);
  767. drm_syncobj_array_free(syncobjs, args->count_handles);
  768. return ret;
  769. }
  770. int
  771. drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
  772. struct drm_file *file_private)
  773. {
  774. struct drm_syncobj_array *args = data;
  775. struct drm_syncobj **syncobjs;
  776. uint32_t i;
  777. int ret;
  778. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  779. return -EOPNOTSUPP;
  780. if (args->pad != 0)
  781. return -EINVAL;
  782. if (args->count_handles == 0)
  783. return -EINVAL;
  784. ret = drm_syncobj_array_find(file_private,
  785. u64_to_user_ptr(args->handles),
  786. args->count_handles,
  787. &syncobjs);
  788. if (ret < 0)
  789. return ret;
  790. for (i = 0; i < args->count_handles; i++)
  791. drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
  792. drm_syncobj_array_free(syncobjs, args->count_handles);
  793. return 0;
  794. }
  795. int
  796. drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
  797. struct drm_file *file_private)
  798. {
  799. struct drm_syncobj_array *args = data;
  800. struct drm_syncobj **syncobjs;
  801. uint32_t i;
  802. int ret;
  803. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  804. return -EOPNOTSUPP;
  805. if (args->pad != 0)
  806. return -EINVAL;
  807. if (args->count_handles == 0)
  808. return -EINVAL;
  809. ret = drm_syncobj_array_find(file_private,
  810. u64_to_user_ptr(args->handles),
  811. args->count_handles,
  812. &syncobjs);
  813. if (ret < 0)
  814. return ret;
  815. for (i = 0; i < args->count_handles; i++) {
  816. ret = drm_syncobj_assign_null_handle(syncobjs[i]);
  817. if (ret < 0)
  818. break;
  819. }
  820. drm_syncobj_array_free(syncobjs, args->count_handles);
  821. return ret;
  822. }