|
@@ -946,54 +946,38 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
|
|
|
unsigned nptes, uint64_t dst,
|
|
|
uint64_t flags)
|
|
|
{
|
|
|
- bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
|
|
|
uint64_t pd_addr, pde;
|
|
|
|
|
|
/* In the case of a mixed PT the PDE must point to it*/
|
|
|
- if (p->adev->asic_type < CHIP_VEGA10 || p->src ||
|
|
|
- nptes != AMDGPU_VM_PTE_COUNT(p->adev)) {
|
|
|
- dst = amdgpu_bo_gpu_offset(entry->base.bo);
|
|
|
- flags = AMDGPU_PTE_VALID;
|
|
|
- } else {
|
|
|
+ if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
|
|
|
+ nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
|
|
|
/* Set the huge page flag to stop scanning at this PDE */
|
|
|
flags |= AMDGPU_PDE_PTE;
|
|
|
}
|
|
|
|
|
|
- if (!entry->huge && !(flags & AMDGPU_PDE_PTE))
|
|
|
+ if (!(flags & AMDGPU_PDE_PTE)) {
|
|
|
+ if (entry->huge) {
|
|
|
+ /* Add the entry to the relocated list to update it. */
|
|
|
+ entry->huge = false;
|
|
|
+ spin_lock(&p->vm->status_lock);
|
|
|
+ list_move(&entry->base.vm_status, &p->vm->relocated);
|
|
|
+ spin_unlock(&p->vm->status_lock);
|
|
|
+ }
|
|
|
return;
|
|
|
- entry->huge = !!(flags & AMDGPU_PDE_PTE);
|
|
|
+ }
|
|
|
|
|
|
+ entry->huge = true;
|
|
|
amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
|
|
|
&dst, &flags);
|
|
|
|
|
|
- if (use_cpu_update) {
|
|
|
- /* In case a huge page is replaced with a system
|
|
|
- * memory mapping, p->pages_addr != NULL and
|
|
|
- * amdgpu_vm_cpu_set_ptes would try to translate dst
|
|
|
- * through amdgpu_vm_map_gart. But dst is already a
|
|
|
- * GPU address (of the page table). Disable
|
|
|
- * amdgpu_vm_map_gart temporarily.
|
|
|
- */
|
|
|
- dma_addr_t *tmp;
|
|
|
-
|
|
|
- tmp = p->pages_addr;
|
|
|
- p->pages_addr = NULL;
|
|
|
-
|
|
|
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
|
|
|
- pde = pd_addr + (entry - parent->entries) * 8;
|
|
|
- amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
|
|
|
-
|
|
|
- p->pages_addr = tmp;
|
|
|
- } else {
|
|
|
- if (parent->base.bo->shadow) {
|
|
|
- pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
|
|
|
- pde = pd_addr + (entry - parent->entries) * 8;
|
|
|
- amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
|
|
|
- }
|
|
|
- pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
|
|
|
+ if (parent->base.bo->shadow) {
|
|
|
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
|
|
|
pde = pd_addr + (entry - parent->entries) * 8;
|
|
|
- amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
|
|
|
+ p->func(p, pde, dst, 1, 0, flags);
|
|
|
}
|
|
|
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
|
|
|
+ pde = pd_addr + (entry - parent->entries) * 8;
|
|
|
+ p->func(p, pde, dst, 1, 0, flags);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1205,12 +1189,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|
|
/* padding, etc. */
|
|
|
ndw = 64;
|
|
|
|
|
|
- /* one PDE write for each huge page */
|
|
|
- if (vm->root.base.bo->shadow)
|
|
|
- ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6 * 2;
|
|
|
- else
|
|
|
- ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
|
|
|
-
|
|
|
if (pages_addr) {
|
|
|
/* copy commands needed */
|
|
|
ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
|
|
@@ -1285,8 +1263,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|
|
|
|
|
error_free:
|
|
|
amdgpu_job_free(job);
|
|
|
- amdgpu_vm_invalidate_level(adev, vm, &vm->root,
|
|
|
- adev->vm_manager.root_level);
|
|
|
return r;
|
|
|
}
|
|
|
|