amdgpu_mn.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Christian König <christian.koenig@amd.com>
  29. */
  30. #include <linux/firmware.h>
  31. #include <linux/module.h>
  32. #include <linux/mmu_notifier.h>
  33. #include <drm/drmP.h>
  34. #include <drm/drm.h>
  35. #include "amdgpu.h"
  36. struct amdgpu_mn {
  37. /* constant after initialisation */
  38. struct amdgpu_device *adev;
  39. struct mm_struct *mm;
  40. struct mmu_notifier mn;
  41. /* only used on destruction */
  42. struct work_struct work;
  43. /* protected by adev->mn_lock */
  44. struct hlist_node node;
  45. /* objects protected by mm->mmap_sem */
  46. struct rb_root objects;
  47. };
  48. struct amdgpu_mn_node {
  49. struct interval_tree_node it;
  50. struct list_head bos;
  51. };
  52. /**
  53. * amdgpu_mn_destroy - destroy the rmn
  54. *
  55. * @work: previously sheduled work item
  56. *
  57. * Lazy destroys the notifier from a work item
  58. */
  59. static void amdgpu_mn_destroy(struct work_struct *work)
  60. {
  61. struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
  62. struct amdgpu_device *adev = rmn->adev;
  63. struct amdgpu_mn_node *node, *next_node;
  64. struct amdgpu_bo *bo, *next_bo;
  65. mutex_lock(&adev->mn_lock);
  66. down_write(&rmn->mm->mmap_sem);
  67. hash_del(&rmn->node);
  68. rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
  69. it.rb) {
  70. list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
  71. bo->mn = NULL;
  72. list_del_init(&bo->mn_list);
  73. }
  74. kfree(node);
  75. }
  76. up_write(&rmn->mm->mmap_sem);
  77. mutex_unlock(&adev->mn_lock);
  78. mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
  79. kfree(rmn);
  80. }
  81. /**
  82. * amdgpu_mn_release - callback to notify about mm destruction
  83. *
  84. * @mn: our notifier
  85. * @mn: the mm this callback is about
  86. *
  87. * Shedule a work item to lazy destroy our notifier.
  88. */
  89. static void amdgpu_mn_release(struct mmu_notifier *mn,
  90. struct mm_struct *mm)
  91. {
  92. struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
  93. INIT_WORK(&rmn->work, amdgpu_mn_destroy);
  94. schedule_work(&rmn->work);
  95. }
  96. /**
  97. * amdgpu_mn_invalidate_range_start - callback to notify about mm change
  98. *
  99. * @mn: our notifier
  100. * @mn: the mm this callback is about
  101. * @start: start of updated range
  102. * @end: end of updated range
  103. *
  104. * We block for all BOs between start and end to be idle and
  105. * unmap them by move them into system domain again.
  106. */
  107. static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
  108. struct mm_struct *mm,
  109. unsigned long start,
  110. unsigned long end)
  111. {
  112. struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
  113. struct interval_tree_node *it;
  114. /* notification is exclusive, but interval is inclusive */
  115. end -= 1;
  116. it = interval_tree_iter_first(&rmn->objects, start, end);
  117. while (it) {
  118. struct amdgpu_mn_node *node;
  119. struct amdgpu_bo *bo;
  120. long r;
  121. node = container_of(it, struct amdgpu_mn_node, it);
  122. it = interval_tree_iter_next(it, start, end);
  123. list_for_each_entry(bo, &node->bos, mn_list) {
  124. if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
  125. end))
  126. continue;
  127. r = amdgpu_bo_reserve(bo, true);
  128. if (r) {
  129. DRM_ERROR("(%ld) failed to reserve user bo\n", r);
  130. continue;
  131. }
  132. r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
  133. true, false, MAX_SCHEDULE_TIMEOUT);
  134. if (r <= 0)
  135. DRM_ERROR("(%ld) failed to wait for user bo\n", r);
  136. amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
  137. r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
  138. if (r)
  139. DRM_ERROR("(%ld) failed to validate user bo\n", r);
  140. amdgpu_bo_unreserve(bo);
  141. }
  142. }
  143. }
  144. static const struct mmu_notifier_ops amdgpu_mn_ops = {
  145. .release = amdgpu_mn_release,
  146. .invalidate_range_start = amdgpu_mn_invalidate_range_start,
  147. };
  148. /**
  149. * amdgpu_mn_get - create notifier context
  150. *
  151. * @adev: amdgpu device pointer
  152. *
  153. * Creates a notifier context for current->mm.
  154. */
  155. static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
  156. {
  157. struct mm_struct *mm = current->mm;
  158. struct amdgpu_mn *rmn;
  159. int r;
  160. mutex_lock(&adev->mn_lock);
  161. down_write(&mm->mmap_sem);
  162. hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
  163. if (rmn->mm == mm)
  164. goto release_locks;
  165. rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
  166. if (!rmn) {
  167. rmn = ERR_PTR(-ENOMEM);
  168. goto release_locks;
  169. }
  170. rmn->adev = adev;
  171. rmn->mm = mm;
  172. rmn->mn.ops = &amdgpu_mn_ops;
  173. rmn->objects = RB_ROOT;
  174. r = __mmu_notifier_register(&rmn->mn, mm);
  175. if (r)
  176. goto free_rmn;
  177. hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
  178. release_locks:
  179. up_write(&mm->mmap_sem);
  180. mutex_unlock(&adev->mn_lock);
  181. return rmn;
  182. free_rmn:
  183. up_write(&mm->mmap_sem);
  184. mutex_unlock(&adev->mn_lock);
  185. kfree(rmn);
  186. return ERR_PTR(r);
  187. }
  188. /**
  189. * amdgpu_mn_register - register a BO for notifier updates
  190. *
  191. * @bo: amdgpu buffer object
  192. * @addr: userptr addr we should monitor
  193. *
  194. * Registers an MMU notifier for the given BO at the specified address.
  195. * Returns 0 on success, -ERRNO if anything goes wrong.
  196. */
  197. int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
  198. {
  199. unsigned long end = addr + amdgpu_bo_size(bo) - 1;
  200. struct amdgpu_device *adev = bo->adev;
  201. struct amdgpu_mn *rmn;
  202. struct amdgpu_mn_node *node = NULL;
  203. struct list_head bos;
  204. struct interval_tree_node *it;
  205. rmn = amdgpu_mn_get(adev);
  206. if (IS_ERR(rmn))
  207. return PTR_ERR(rmn);
  208. INIT_LIST_HEAD(&bos);
  209. down_write(&rmn->mm->mmap_sem);
  210. while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
  211. kfree(node);
  212. node = container_of(it, struct amdgpu_mn_node, it);
  213. interval_tree_remove(&node->it, &rmn->objects);
  214. addr = min(it->start, addr);
  215. end = max(it->last, end);
  216. list_splice(&node->bos, &bos);
  217. }
  218. if (!node) {
  219. node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
  220. if (!node) {
  221. up_write(&rmn->mm->mmap_sem);
  222. return -ENOMEM;
  223. }
  224. }
  225. bo->mn = rmn;
  226. node->it.start = addr;
  227. node->it.last = end;
  228. INIT_LIST_HEAD(&node->bos);
  229. list_splice(&bos, &node->bos);
  230. list_add(&bo->mn_list, &node->bos);
  231. interval_tree_insert(&node->it, &rmn->objects);
  232. up_write(&rmn->mm->mmap_sem);
  233. return 0;
  234. }
  235. /**
  236. * amdgpu_mn_unregister - unregister a BO for notifier updates
  237. *
  238. * @bo: amdgpu buffer object
  239. *
  240. * Remove any registration of MMU notifier updates from the buffer object.
  241. */
  242. void amdgpu_mn_unregister(struct amdgpu_bo *bo)
  243. {
  244. struct amdgpu_device *adev = bo->adev;
  245. struct amdgpu_mn *rmn;
  246. struct list_head *head;
  247. mutex_lock(&adev->mn_lock);
  248. rmn = bo->mn;
  249. if (rmn == NULL) {
  250. mutex_unlock(&adev->mn_lock);
  251. return;
  252. }
  253. down_write(&rmn->mm->mmap_sem);
  254. /* save the next list entry for later */
  255. head = bo->mn_list.next;
  256. bo->mn = NULL;
  257. list_del(&bo->mn_list);
  258. if (list_empty(head)) {
  259. struct amdgpu_mn_node *node;
  260. node = container_of(head, struct amdgpu_mn_node, bos);
  261. interval_tree_remove(&node->it, &rmn->objects);
  262. kfree(node);
  263. }
  264. up_write(&rmn->mm->mmap_sem);
  265. mutex_unlock(&adev->mn_lock);
  266. }