Pārlūkot izejas kodu

drm/amdgpu: remove the userptr rmn->lock

Avoid a lock inversion problem by just using the mmap_sem to
protect the entries of the intervall tree.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Christian König 9 gadi atpakaļ
vecāks
revīzija
c41d271d75
1 mainītis faili ar 12 papildinājumiem un 20 dzēšanām
  1. 12 20
      drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c

+ 12 - 20
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c

@@ -48,8 +48,7 @@ struct amdgpu_mn {
 	/* protected by adev->mn_lock */
 	/* protected by adev->mn_lock */
 	struct hlist_node	node;
 	struct hlist_node	node;
 
 
-	/* objects protected by lock */
-	struct mutex		lock;
+	/* objects protected by mm->mmap_sem */
 	struct rb_root		objects;
 	struct rb_root		objects;
 };
 };
 
 
@@ -72,8 +71,8 @@ static void amdgpu_mn_destroy(struct work_struct *work)
 	struct amdgpu_mn_node *node, *next_node;
 	struct amdgpu_mn_node *node, *next_node;
 	struct amdgpu_bo *bo, *next_bo;
 	struct amdgpu_bo *bo, *next_bo;
 
 
+	down_write(&rmn->mm->mmap_sem);
 	mutex_lock(&adev->mn_lock);
 	mutex_lock(&adev->mn_lock);
-	mutex_lock(&rmn->lock);
 	hash_del(&rmn->node);
 	hash_del(&rmn->node);
 	rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
 	rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
 					     it.rb) {
 					     it.rb) {
@@ -85,8 +84,8 @@ static void amdgpu_mn_destroy(struct work_struct *work)
 		}
 		}
 		kfree(node);
 		kfree(node);
 	}
 	}
-	mutex_unlock(&rmn->lock);
 	mutex_unlock(&adev->mn_lock);
 	mutex_unlock(&adev->mn_lock);
+	up_write(&rmn->mm->mmap_sem);
 	mmu_notifier_unregister(&rmn->mn, rmn->mm);
 	mmu_notifier_unregister(&rmn->mn, rmn->mm);
 	kfree(rmn);
 	kfree(rmn);
 }
 }
@@ -129,8 +128,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
 	/* notification is exclusive, but interval is inclusive */
 	/* notification is exclusive, but interval is inclusive */
 	end -= 1;
 	end -= 1;
 
 
-	mutex_lock(&rmn->lock);
-
 	it = interval_tree_iter_first(&rmn->objects, start, end);
 	it = interval_tree_iter_first(&rmn->objects, start, end);
 	while (it) {
 	while (it) {
 		struct amdgpu_mn_node *node;
 		struct amdgpu_mn_node *node;
@@ -165,8 +162,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
 			amdgpu_bo_unreserve(bo);
 			amdgpu_bo_unreserve(bo);
 		}
 		}
 	}
 	}
-
-	mutex_unlock(&rmn->lock);
 }
 }
 
 
 static const struct mmu_notifier_ops amdgpu_mn_ops = {
 static const struct mmu_notifier_ops amdgpu_mn_ops = {
@@ -203,7 +198,6 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
 	rmn->adev = adev;
 	rmn->adev = adev;
 	rmn->mm = mm;
 	rmn->mm = mm;
 	rmn->mn.ops = &amdgpu_mn_ops;
 	rmn->mn.ops = &amdgpu_mn_ops;
-	mutex_init(&rmn->lock);
 	rmn->objects = RB_ROOT;
 	rmn->objects = RB_ROOT;
 
 
 	r = __mmu_notifier_register(&rmn->mn, mm);
 	r = __mmu_notifier_register(&rmn->mn, mm);
@@ -250,7 +244,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 
 
 	INIT_LIST_HEAD(&bos);
 	INIT_LIST_HEAD(&bos);
 
 
-	mutex_lock(&rmn->lock);
+	down_write(&rmn->mm->mmap_sem);
 
 
 	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
 	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
 		kfree(node);
 		kfree(node);
@@ -264,7 +258,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 	if (!node) {
 	if (!node) {
 		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
 		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
 		if (!node) {
 		if (!node) {
-			mutex_unlock(&rmn->lock);
+			up_write(&rmn->mm->mmap_sem);
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
 	}
 	}
@@ -279,7 +273,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 
 
 	interval_tree_insert(&node->it, &rmn->objects);
 	interval_tree_insert(&node->it, &rmn->objects);
 
 
-	mutex_unlock(&rmn->lock);
+	up_write(&rmn->mm->mmap_sem);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -294,17 +288,15 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
 {
 {
 	struct amdgpu_device *adev = bo->adev;
 	struct amdgpu_device *adev = bo->adev;
-	struct amdgpu_mn *rmn;
+	struct amdgpu_mn *rmn = bo->mn;
 	struct list_head *head;
 	struct list_head *head;
 
 
-	mutex_lock(&adev->mn_lock);
-	rmn = bo->mn;
-	if (rmn == NULL) {
-		mutex_unlock(&adev->mn_lock);
+	if (rmn == NULL)
 		return;
 		return;
-	}
 
 
-	mutex_lock(&rmn->lock);
+	down_write(&rmn->mm->mmap_sem);
+	mutex_lock(&adev->mn_lock);
+
 	/* save the next list entry for later */
 	/* save the next list entry for later */
 	head = bo->mn_list.next;
 	head = bo->mn_list.next;
 
 
@@ -318,6 +310,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
 		kfree(node);
 		kfree(node);
 	}
 	}
 
 
-	mutex_unlock(&rmn->lock);
 	mutex_unlock(&adev->mn_lock);
 	mutex_unlock(&adev->mn_lock);
+	up_write(&rmn->mm->mmap_sem);
 }
 }