|
@@ -268,11 +268,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
int r = 0;
|
|
int r = 0;
|
|
|
|
|
|
mutex_lock(&id_mgr->lock);
|
|
mutex_lock(&id_mgr->lock);
|
|
- if (vm->reserved_vmid[vmhub]) {
|
|
|
|
- r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
|
|
|
|
- mutex_unlock(&id_mgr->lock);
|
|
|
|
- return r;
|
|
|
|
- }
|
|
|
|
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
|
|
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
|
|
if (!fences) {
|
|
if (!fences) {
|
|
mutex_unlock(&id_mgr->lock);
|
|
mutex_unlock(&id_mgr->lock);
|
|
@@ -319,6 +314,13 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
}
|
|
}
|
|
kfree(fences);
|
|
kfree(fences);
|
|
|
|
|
|
|
|
+ if (vm->reserved_vmid[vmhub]) {
|
|
|
|
+ r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
|
|
|
|
+ fence, job);
|
|
|
|
+ mutex_unlock(&id_mgr->lock);
|
|
|
|
+ return r;
|
|
|
|
+ }
|
|
|
|
+
|
|
job->vm_needs_flush = vm->use_cpu_for_update;
|
|
job->vm_needs_flush = vm->use_cpu_for_update;
|
|
/* Check if we can use a VMID already assigned to this VM */
|
|
/* Check if we can use a VMID already assigned to this VM */
|
|
list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
|
|
list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
|