|
@@ -1117,28 +1117,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|
*
|
|
*
|
|
* @adev: amdgpu_device pointer
|
|
* @adev: amdgpu_device pointer
|
|
* @bo_va: requested BO and VM object
|
|
* @bo_va: requested BO and VM object
|
|
- * @mem: ttm mem
|
|
|
|
|
|
+ * @clear: if true clear the entries
|
|
*
|
|
*
|
|
* Fill in the page table entries for @bo_va.
|
|
* Fill in the page table entries for @bo_va.
|
|
* Returns 0 for success, -EINVAL for failure.
|
|
* Returns 0 for success, -EINVAL for failure.
|
|
- *
|
|
|
|
- * Object have to be reserved and mutex must be locked!
|
|
|
|
*/
|
|
*/
|
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
struct amdgpu_bo_va *bo_va,
|
|
struct amdgpu_bo_va *bo_va,
|
|
- struct ttm_mem_reg *mem)
|
|
|
|
|
|
+ bool clear)
|
|
{
|
|
{
|
|
struct amdgpu_vm *vm = bo_va->vm;
|
|
struct amdgpu_vm *vm = bo_va->vm;
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
dma_addr_t *pages_addr = NULL;
|
|
dma_addr_t *pages_addr = NULL;
|
|
uint32_t gtt_flags, flags;
|
|
uint32_t gtt_flags, flags;
|
|
|
|
+ struct ttm_mem_reg *mem;
|
|
struct fence *exclusive;
|
|
struct fence *exclusive;
|
|
uint64_t addr;
|
|
uint64_t addr;
|
|
int r;
|
|
int r;
|
|
|
|
|
|
- if (mem) {
|
|
|
|
|
|
+ if (clear) {
|
|
|
|
+ mem = NULL;
|
|
|
|
+ addr = 0;
|
|
|
|
+ exclusive = NULL;
|
|
|
|
+ } else {
|
|
struct ttm_dma_tt *ttm;
|
|
struct ttm_dma_tt *ttm;
|
|
|
|
|
|
|
|
+ mem = &bo_va->bo->tbo.mem;
|
|
addr = (u64)mem->start << PAGE_SHIFT;
|
|
addr = (u64)mem->start << PAGE_SHIFT;
|
|
switch (mem->mem_type) {
|
|
switch (mem->mem_type) {
|
|
case TTM_PL_TT:
|
|
case TTM_PL_TT:
|
|
@@ -1156,9 +1160,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
}
|
|
}
|
|
|
|
|
|
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
|
|
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
|
|
- } else {
|
|
|
|
- addr = 0;
|
|
|
|
- exclusive = NULL;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
|
|
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
|
|
@@ -1189,7 +1190,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
spin_lock(&vm->status_lock);
|
|
spin_lock(&vm->status_lock);
|
|
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
|
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
|
list_del_init(&bo_va->vm_status);
|
|
list_del_init(&bo_va->vm_status);
|
|
- if (!mem)
|
|
|
|
|
|
+ if (clear)
|
|
list_add(&bo_va->vm_status, &vm->cleared);
|
|
list_add(&bo_va->vm_status, &vm->cleared);
|
|
spin_unlock(&vm->status_lock);
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
|
@@ -1252,7 +1253,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
|
struct amdgpu_bo_va, vm_status);
|
|
struct amdgpu_bo_va, vm_status);
|
|
spin_unlock(&vm->status_lock);
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
|
- r = amdgpu_vm_bo_update(adev, bo_va, NULL);
|
|
|
|
|
|
+ r = amdgpu_vm_bo_update(adev, bo_va, true);
|
|
if (r)
|
|
if (r)
|
|
return r;
|
|
return r;
|
|
|
|
|