|
@@ -251,6 +251,19 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
|
+ list_for_each_entry(bo_base, &vm->idle, vm_status) {
|
|
|
+ struct amdgpu_bo *bo = bo_base->bo;
|
|
|
+
|
|
|
+ if (!bo->parent)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ ttm_bo_move_to_lru_tail(&bo->tbo);
|
|
|
+ if (bo->shadow)
|
|
|
+ ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
|
|
|
+ }
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
+
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -965,7 +978,7 @@ restart:
|
|
|
struct amdgpu_vm_bo_base,
|
|
|
vm_status);
|
|
|
bo_base->moved = false;
|
|
|
- list_del_init(&bo_base->vm_status);
|
|
|
+ list_move(&bo_base->vm_status, &vm->idle);
|
|
|
|
|
|
bo = bo_base->bo->parent;
|
|
|
if (!bo)
|
|
@@ -1571,10 +1584,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
|
* the evicted list so that it gets validated again on the
|
|
|
* next command submission.
|
|
|
*/
|
|
|
- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
|
|
|
- !(bo->preferred_domains &
|
|
|
- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
|
|
|
- list_add_tail(&bo_va->base.vm_status, &vm->evicted);
|
|
|
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
|
|
|
+ uint32_t mem_type = bo->tbo.mem.mem_type;
|
|
|
+
|
|
|
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
|
|
|
+ list_add_tail(&bo_va->base.vm_status, &vm->evicted);
|
|
|
+ else
|
|
|
+ list_add(&bo_va->base.vm_status, &vm->idle);
|
|
|
+ }
|
|
|
|
|
|
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
|
|
bo_va->cleared = clear;
|
|
@@ -2368,6 +2385,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
INIT_LIST_HEAD(&vm->relocated);
|
|
|
spin_lock_init(&vm->moved_lock);
|
|
|
INIT_LIST_HEAD(&vm->moved);
|
|
|
+ INIT_LIST_HEAD(&vm->idle);
|
|
|
INIT_LIST_HEAD(&vm->freed);
|
|
|
|
|
|
/* create scheduler entity for page table updates */
|