|
@@ -98,40 +98,27 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * amdgpu_vm_get_bos - add the vm BOs to a validation list
|
|
|
+ * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
|
|
|
*
|
|
|
* @vm: vm providing the BOs
|
|
|
* @duplicates: head of duplicates list
|
|
|
*
|
|
|
- * Add the page directory to the list of BOs to
|
|
|
- * validate for command submission (cayman+).
|
|
|
+ * Add the page directory to the BO duplicates list
|
|
|
+ * for command submission.
|
|
|
*/
|
|
|
-struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm,
|
|
|
- struct list_head *duplicates)
|
|
|
+void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
|
|
|
{
|
|
|
- struct amdgpu_bo_list_entry *list;
|
|
|
- unsigned i, idx;
|
|
|
-
|
|
|
- list = drm_malloc_ab(vm->max_pde_used + 1,
|
|
|
- sizeof(struct amdgpu_bo_list_entry));
|
|
|
- if (!list)
|
|
|
- return NULL;
|
|
|
+ unsigned i;
|
|
|
|
|
|
/* add the vm page table to the list */
|
|
|
- for (i = 0, idx = 0; i <= vm->max_pde_used; i++) {
|
|
|
- if (!vm->page_tables[i].bo)
|
|
|
+ for (i = 0; i <= vm->max_pde_used; ++i) {
|
|
|
+ struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
|
|
|
+
|
|
|
+ if (!entry->robj)
|
|
|
continue;
|
|
|
|
|
|
- list[idx].robj = vm->page_tables[i].bo;
|
|
|
- list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
|
|
|
- list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
|
|
|
- list[idx].priority = 0;
|
|
|
- list[idx].tv.bo = &list[idx].robj->tbo;
|
|
|
- list[idx].tv.shared = true;
|
|
|
- list_add(&list[idx++].tv.head, duplicates);
|
|
|
+ list_add(&entry->tv.head, duplicates);
|
|
|
}
|
|
|
-
|
|
|
- return list;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -474,7 +461,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|
|
|
|
|
/* walk over the address space and update the page directory */
|
|
|
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
|
|
|
- struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
|
|
|
+ struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
|
|
|
uint64_t pde, pt;
|
|
|
|
|
|
if (bo == NULL)
|
|
@@ -651,7 +638,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
|
|
|
/* walk over the address space and update the page tables */
|
|
|
for (addr = start; addr < end; ) {
|
|
|
uint64_t pt_idx = addr >> amdgpu_vm_block_size;
|
|
|
- struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
|
|
|
+ struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
|
|
|
unsigned nptes;
|
|
|
uint64_t pte;
|
|
|
int r;
|
|
@@ -1083,9 +1070,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
/* walk over the address space and allocate the page tables */
|
|
|
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
|
|
|
struct reservation_object *resv = vm->page_directory->tbo.resv;
|
|
|
+ struct amdgpu_bo_list_entry *entry;
|
|
|
struct amdgpu_bo *pt;
|
|
|
|
|
|
- if (vm->page_tables[pt_idx].bo)
|
|
|
+ entry = &vm->page_tables[pt_idx].entry;
|
|
|
+ if (entry->robj)
|
|
|
continue;
|
|
|
|
|
|
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
|
|
@@ -1102,8 +1091,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
goto error_free;
|
|
|
}
|
|
|
|
|
|
+ entry->robj = pt;
|
|
|
+ entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
|
|
|
+ entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
|
|
|
+ entry->priority = 0;
|
|
|
+ entry->tv.bo = &entry->robj->tbo;
|
|
|
+ entry->tv.shared = true;
|
|
|
vm->page_tables[pt_idx].addr = 0;
|
|
|
- vm->page_tables[pt_idx].bo = pt;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -1334,7 +1328,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
|
|
|
- amdgpu_bo_unref(&vm->page_tables[i].bo);
|
|
|
+ amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
|
|
|
kfree(vm->page_tables);
|
|
|
|
|
|
amdgpu_bo_unref(&vm->page_directory);
|