|
@@ -216,6 +216,20 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
struct amdgpu_vm_manager_id,
|
|
struct amdgpu_vm_manager_id,
|
|
list);
|
|
list);
|
|
|
|
|
|
|
|
+ if (id->mgr_id->active && !fence_is_signaled(id->mgr_id->active)) {
|
|
|
|
+ struct amdgpu_vm_manager_id *mgr_id, *tmp;
|
|
|
|
+ struct list_head *head = &adev->vm_manager.ids_lru;
|
|
|
|
+ list_for_each_entry_safe(mgr_id, tmp, &adev->vm_manager.ids_lru, list) {
|
|
|
|
+ if (mgr_id->active && fence_is_signaled(mgr_id->active)) {
|
|
|
|
+ list_move(&mgr_id->list, head);
|
|
|
|
+ head = &mgr_id->list;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
|
|
|
|
+ struct amdgpu_vm_manager_id,
|
|
|
|
+ list);
|
|
|
|
+ }
|
|
|
|
+
|
|
r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
|
|
r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
|
|
if (!r) {
|
|
if (!r) {
|
|
fence_put(id->mgr_id->active);
|
|
fence_put(id->mgr_id->active);
|