amdgpu_mn.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Christian König <christian.koenig@amd.com>
  29. */
  30. #include <linux/firmware.h>
  31. #include <linux/module.h>
  32. #include <linux/mmu_notifier.h>
  33. #include <linux/interval_tree.h>
  34. #include <drm/drmP.h>
  35. #include <drm/drm.h>
  36. #include "amdgpu.h"
  37. struct amdgpu_mn {
  38. /* constant after initialisation */
  39. struct amdgpu_device *adev;
  40. struct mm_struct *mm;
  41. struct mmu_notifier mn;
  42. /* only used on destruction */
  43. struct work_struct work;
  44. /* protected by adev->mn_lock */
  45. struct hlist_node node;
  46. /* objects protected by lock */
  47. struct rw_semaphore lock;
  48. struct rb_root objects;
  49. };
  50. struct amdgpu_mn_node {
  51. struct interval_tree_node it;
  52. struct list_head bos;
  53. };
  54. /**
  55. * amdgpu_mn_destroy - destroy the rmn
  56. *
  57. * @work: previously sheduled work item
  58. *
  59. * Lazy destroys the notifier from a work item
  60. */
  61. static void amdgpu_mn_destroy(struct work_struct *work)
  62. {
  63. struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
  64. struct amdgpu_device *adev = rmn->adev;
  65. struct amdgpu_mn_node *node, *next_node;
  66. struct amdgpu_bo *bo, *next_bo;
  67. mutex_lock(&adev->mn_lock);
  68. down_write(&rmn->lock);
  69. hash_del(&rmn->node);
  70. rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
  71. it.rb) {
  72. list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
  73. bo->mn = NULL;
  74. list_del_init(&bo->mn_list);
  75. }
  76. kfree(node);
  77. }
  78. up_write(&rmn->lock);
  79. mutex_unlock(&adev->mn_lock);
  80. mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
  81. kfree(rmn);
  82. }
  83. /**
  84. * amdgpu_mn_release - callback to notify about mm destruction
  85. *
  86. * @mn: our notifier
  87. * @mn: the mm this callback is about
  88. *
  89. * Shedule a work item to lazy destroy our notifier.
  90. */
  91. static void amdgpu_mn_release(struct mmu_notifier *mn,
  92. struct mm_struct *mm)
  93. {
  94. struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
  95. INIT_WORK(&rmn->work, amdgpu_mn_destroy);
  96. schedule_work(&rmn->work);
  97. }
  98. /**
  99. * amdgpu_mn_invalidate_node - unmap all BOs of a node
  100. *
  101. * @node: the node with the BOs to unmap
  102. *
  103. * We block for all BOs and unmap them by move them
  104. * into system domain again.
  105. */
  106. static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
  107. unsigned long start,
  108. unsigned long end)
  109. {
  110. struct amdgpu_bo *bo;
  111. long r;
  112. list_for_each_entry(bo, &node->bos, mn_list) {
  113. if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
  114. continue;
  115. r = amdgpu_bo_reserve(bo, true);
  116. if (r) {
  117. DRM_ERROR("(%ld) failed to reserve user bo\n", r);
  118. continue;
  119. }
  120. r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
  121. true, false, MAX_SCHEDULE_TIMEOUT);
  122. if (r <= 0)
  123. DRM_ERROR("(%ld) failed to wait for user bo\n", r);
  124. amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
  125. amdgpu_bo_unreserve(bo);
  126. }
  127. }
  128. /**
  129. * amdgpu_mn_invalidate_page - callback to notify about mm change
  130. *
  131. * @mn: our notifier
  132. * @mn: the mm this callback is about
  133. * @address: address of invalidate page
  134. *
  135. * Invalidation of a single page. Blocks for all BOs mapping it
  136. * and unmap them by move them into system domain again.
  137. */
  138. static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
  139. struct mm_struct *mm,
  140. unsigned long address)
  141. {
  142. struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
  143. struct interval_tree_node *it;
  144. down_read(&rmn->lock);
  145. it = interval_tree_iter_first(&rmn->objects, address, address);
  146. if (it) {
  147. struct amdgpu_mn_node *node;
  148. node = container_of(it, struct amdgpu_mn_node, it);
  149. amdgpu_mn_invalidate_node(node, address, address);
  150. }
  151. up_read(&rmn->lock);
  152. }
  153. /**
  154. * amdgpu_mn_invalidate_range_start - callback to notify about mm change
  155. *
  156. * @mn: our notifier
  157. * @mn: the mm this callback is about
  158. * @start: start of updated range
  159. * @end: end of updated range
  160. *
  161. * We block for all BOs between start and end to be idle and
  162. * unmap them by move them into system domain again.
  163. */
  164. static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
  165. struct mm_struct *mm,
  166. unsigned long start,
  167. unsigned long end)
  168. {
  169. struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
  170. struct interval_tree_node *it;
  171. /* notification is exclusive, but interval is inclusive */
  172. end -= 1;
  173. down_read(&rmn->lock);
  174. it = interval_tree_iter_first(&rmn->objects, start, end);
  175. while (it) {
  176. struct amdgpu_mn_node *node;
  177. node = container_of(it, struct amdgpu_mn_node, it);
  178. it = interval_tree_iter_next(it, start, end);
  179. amdgpu_mn_invalidate_node(node, start, end);
  180. }
  181. up_read(&rmn->lock);
  182. }
  183. static const struct mmu_notifier_ops amdgpu_mn_ops = {
  184. .release = amdgpu_mn_release,
  185. .invalidate_page = amdgpu_mn_invalidate_page,
  186. .invalidate_range_start = amdgpu_mn_invalidate_range_start,
  187. };
  188. /**
  189. * amdgpu_mn_get - create notifier context
  190. *
  191. * @adev: amdgpu device pointer
  192. *
  193. * Creates a notifier context for current->mm.
  194. */
  195. static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
  196. {
  197. struct mm_struct *mm = current->mm;
  198. struct amdgpu_mn *rmn;
  199. int r;
  200. mutex_lock(&adev->mn_lock);
  201. if (down_write_killable(&mm->mmap_sem)) {
  202. mutex_unlock(&adev->mn_lock);
  203. return ERR_PTR(-EINTR);
  204. }
  205. hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
  206. if (rmn->mm == mm)
  207. goto release_locks;
  208. rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
  209. if (!rmn) {
  210. rmn = ERR_PTR(-ENOMEM);
  211. goto release_locks;
  212. }
  213. rmn->adev = adev;
  214. rmn->mm = mm;
  215. rmn->mn.ops = &amdgpu_mn_ops;
  216. init_rwsem(&rmn->lock);
  217. rmn->objects = RB_ROOT;
  218. r = __mmu_notifier_register(&rmn->mn, mm);
  219. if (r)
  220. goto free_rmn;
  221. hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
  222. release_locks:
  223. up_write(&mm->mmap_sem);
  224. mutex_unlock(&adev->mn_lock);
  225. return rmn;
  226. free_rmn:
  227. up_write(&mm->mmap_sem);
  228. mutex_unlock(&adev->mn_lock);
  229. kfree(rmn);
  230. return ERR_PTR(r);
  231. }
  232. /**
  233. * amdgpu_mn_register - register a BO for notifier updates
  234. *
  235. * @bo: amdgpu buffer object
  236. * @addr: userptr addr we should monitor
  237. *
  238. * Registers an MMU notifier for the given BO at the specified address.
  239. * Returns 0 on success, -ERRNO if anything goes wrong.
  240. */
  241. int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
  242. {
  243. unsigned long end = addr + amdgpu_bo_size(bo) - 1;
  244. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
  245. struct amdgpu_mn *rmn;
  246. struct amdgpu_mn_node *node = NULL;
  247. struct list_head bos;
  248. struct interval_tree_node *it;
  249. rmn = amdgpu_mn_get(adev);
  250. if (IS_ERR(rmn))
  251. return PTR_ERR(rmn);
  252. INIT_LIST_HEAD(&bos);
  253. down_write(&rmn->lock);
  254. while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
  255. kfree(node);
  256. node = container_of(it, struct amdgpu_mn_node, it);
  257. interval_tree_remove(&node->it, &rmn->objects);
  258. addr = min(it->start, addr);
  259. end = max(it->last, end);
  260. list_splice(&node->bos, &bos);
  261. }
  262. if (!node) {
  263. node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
  264. if (!node) {
  265. up_write(&rmn->lock);
  266. return -ENOMEM;
  267. }
  268. }
  269. bo->mn = rmn;
  270. node->it.start = addr;
  271. node->it.last = end;
  272. INIT_LIST_HEAD(&node->bos);
  273. list_splice(&bos, &node->bos);
  274. list_add(&bo->mn_list, &node->bos);
  275. interval_tree_insert(&node->it, &rmn->objects);
  276. up_write(&rmn->lock);
  277. return 0;
  278. }
  279. /**
  280. * amdgpu_mn_unregister - unregister a BO for notifier updates
  281. *
  282. * @bo: amdgpu buffer object
  283. *
  284. * Remove any registration of MMU notifier updates from the buffer object.
  285. */
  286. void amdgpu_mn_unregister(struct amdgpu_bo *bo)
  287. {
  288. struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
  289. struct amdgpu_mn *rmn;
  290. struct list_head *head;
  291. mutex_lock(&adev->mn_lock);
  292. rmn = bo->mn;
  293. if (rmn == NULL) {
  294. mutex_unlock(&adev->mn_lock);
  295. return;
  296. }
  297. down_write(&rmn->lock);
  298. /* save the next list entry for later */
  299. head = bo->mn_list.next;
  300. bo->mn = NULL;
  301. list_del_init(&bo->mn_list);
  302. if (list_empty(head)) {
  303. struct amdgpu_mn_node *node;
  304. node = container_of(head, struct amdgpu_mn_node, bos);
  305. interval_tree_remove(&node->it, &rmn->objects);
  306. kfree(node);
  307. }
  308. up_write(&rmn->lock);
  309. mutex_unlock(&adev->mn_lock);
  310. }