|
@@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
int r;
|
|
int r;
|
|
|
|
|
|
|
|
+ spin_lock(&vm->freed_lock);
|
|
while (!list_empty(&vm->freed)) {
|
|
while (!list_empty(&vm->freed)) {
|
|
mapping = list_first_entry(&vm->freed,
|
|
mapping = list_first_entry(&vm->freed,
|
|
struct amdgpu_bo_va_mapping, list);
|
|
struct amdgpu_bo_va_mapping, list);
|
|
list_del(&mapping->list);
|
|
list_del(&mapping->list);
|
|
-
|
|
|
|
|
|
+ spin_unlock(&vm->freed_lock);
|
|
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
|
|
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
|
|
kfree(mapping);
|
|
kfree(mapping);
|
|
if (r)
|
|
if (r)
|
|
return r;
|
|
return r;
|
|
|
|
|
|
|
|
+ spin_lock(&vm->freed_lock);
|
|
}
|
|
}
|
|
|
|
+ spin_unlock(&vm->freed_lock);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
}
|
|
}
|
|
@@ -1155,10 +1159,13 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|
spin_unlock(&vm->it_lock);
|
|
spin_unlock(&vm->it_lock);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
|
|
|
|
- if (valid)
|
|
|
|
|
|
+ if (valid) {
|
|
|
|
+ spin_lock(&vm->freed_lock);
|
|
list_add(&mapping->list, &vm->freed);
|
|
list_add(&mapping->list, &vm->freed);
|
|
- else
|
|
|
|
|
|
+ spin_unlock(&vm->freed_lock);
|
|
|
|
+ } else {
|
|
kfree(mapping);
|
|
kfree(mapping);
|
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1191,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
spin_unlock(&vm->it_lock);
|
|
spin_unlock(&vm->it_lock);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
|
|
+ spin_lock(&vm->freed_lock);
|
|
list_add(&mapping->list, &vm->freed);
|
|
list_add(&mapping->list, &vm->freed);
|
|
|
|
+ spin_unlock(&vm->freed_lock);
|
|
}
|
|
}
|
|
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
|
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
|
list_del(&mapping->list);
|
|
list_del(&mapping->list);
|
|
@@ -1252,6 +1261,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
INIT_LIST_HEAD(&vm->cleared);
|
|
INIT_LIST_HEAD(&vm->cleared);
|
|
INIT_LIST_HEAD(&vm->freed);
|
|
INIT_LIST_HEAD(&vm->freed);
|
|
spin_lock_init(&vm->it_lock);
|
|
spin_lock_init(&vm->it_lock);
|
|
|
|
+ spin_lock_init(&vm->freed_lock);
|
|
pd_size = amdgpu_vm_directory_size(adev);
|
|
pd_size = amdgpu_vm_directory_size(adev);
|
|
pd_entries = amdgpu_vm_num_pdes(adev);
|
|
pd_entries = amdgpu_vm_num_pdes(adev);
|
|
|
|
|