|
@@ -50,7 +50,7 @@ struct amdgpu_mn {
|
|
|
struct hlist_node node;
|
|
|
|
|
|
/* objects protected by lock */
|
|
|
- struct mutex lock;
|
|
|
+ struct rw_semaphore lock;
|
|
|
struct rb_root objects;
|
|
|
};
|
|
|
|
|
@@ -74,7 +74,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
|
|
|
struct amdgpu_bo *bo, *next_bo;
|
|
|
|
|
|
mutex_lock(&adev->mn_lock);
|
|
|
- mutex_lock(&rmn->lock);
|
|
|
+ down_write(&rmn->lock);
|
|
|
hash_del(&rmn->node);
|
|
|
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
|
|
|
it.rb) {
|
|
@@ -84,7 +84,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
|
|
|
}
|
|
|
kfree(node);
|
|
|
}
|
|
|
- mutex_unlock(&rmn->lock);
|
|
|
+ up_write(&rmn->lock);
|
|
|
mutex_unlock(&adev->mn_lock);
|
|
|
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
|
|
|
kfree(rmn);
|
|
@@ -160,7 +160,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
|
|
|
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
|
|
|
struct interval_tree_node *it;
|
|
|
|
|
|
- mutex_lock(&rmn->lock);
|
|
|
+ down_read(&rmn->lock);
|
|
|
|
|
|
it = interval_tree_iter_first(&rmn->objects, address, address);
|
|
|
if (it) {
|
|
@@ -170,7 +170,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
|
|
|
amdgpu_mn_invalidate_node(node, address, address);
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&rmn->lock);
|
|
|
+ up_read(&rmn->lock);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -195,7 +195,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|
|
/* notification is exclusive, but interval is inclusive */
|
|
|
end -= 1;
|
|
|
|
|
|
- mutex_lock(&rmn->lock);
|
|
|
+ down_read(&rmn->lock);
|
|
|
|
|
|
it = interval_tree_iter_first(&rmn->objects, start, end);
|
|
|
while (it) {
|
|
@@ -207,7 +207,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|
|
amdgpu_mn_invalidate_node(node, start, end);
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&rmn->lock);
|
|
|
+ up_read(&rmn->lock);
|
|
|
}
|
|
|
|
|
|
static const struct mmu_notifier_ops amdgpu_mn_ops = {
|
|
@@ -248,7 +248,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
|
|
|
rmn->adev = adev;
|
|
|
rmn->mm = mm;
|
|
|
rmn->mn.ops = &amdgpu_mn_ops;
|
|
|
- mutex_init(&rmn->lock);
|
|
|
+ init_rwsem(&rmn->lock);
|
|
|
rmn->objects = RB_ROOT;
|
|
|
|
|
|
r = __mmu_notifier_register(&rmn->mn, mm);
|
|
@@ -295,7 +295,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|
|
|
|
|
INIT_LIST_HEAD(&bos);
|
|
|
|
|
|
- mutex_lock(&rmn->lock);
|
|
|
+ down_write(&rmn->lock);
|
|
|
|
|
|
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
|
|
|
kfree(node);
|
|
@@ -309,7 +309,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|
|
if (!node) {
|
|
|
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
|
|
|
if (!node) {
|
|
|
- mutex_unlock(&rmn->lock);
|
|
|
+ up_write(&rmn->lock);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
}
|
|
@@ -324,7 +324,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|
|
|
|
|
interval_tree_insert(&node->it, &rmn->objects);
|
|
|
|
|
|
- mutex_unlock(&rmn->lock);
|
|
|
+ up_write(&rmn->lock);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -350,7 +350,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- mutex_lock(&rmn->lock);
|
|
|
+ down_write(&rmn->lock);
|
|
|
|
|
|
/* save the next list entry for later */
|
|
|
head = bo->mn_list.next;
|
|
@@ -365,6 +365,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
|
|
|
kfree(node);
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&rmn->lock);
|
|
|
+ up_write(&rmn->lock);
|
|
|
mutex_unlock(&adev->mn_lock);
|
|
|
}
|