|
@@ -159,11 +159,20 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
|
|
*/
|
|
|
static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
|
|
|
int (*validate)(void *, struct amdgpu_bo *),
|
|
|
- void *param, bool use_cpu_for_update)
|
|
|
+ void *param, bool use_cpu_for_update,
|
|
|
+ struct ttm_bo_global *glob)
|
|
|
{
|
|
|
unsigned i;
|
|
|
int r;
|
|
|
|
|
|
+ if (parent->bo->shadow) {
|
|
|
+ struct amdgpu_bo *shadow = parent->bo->shadow;
|
|
|
+
|
|
|
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
|
|
+ if (r)
|
|
|
+ return r;
|
|
|
+ }
|
|
|
+
|
|
|
if (use_cpu_for_update) {
|
|
|
r = amdgpu_bo_kmap(parent->bo, NULL);
|
|
|
if (r)
|
|
@@ -183,12 +192,18 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
|
+ ttm_bo_move_to_lru_tail(&entry->bo->tbo);
|
|
|
+ if (entry->bo->shadow)
|
|
|
+ ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo);
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
+
|
|
|
/*
|
|
|
* Recurse into the sub directory. This is harmless because we
|
|
|
* have only a maximum of 5 layers.
|
|
|
*/
|
|
|
r = amdgpu_vm_validate_level(entry, validate, param,
|
|
|
- use_cpu_for_update);
|
|
|
+ use_cpu_for_update, glob);
|
|
|
if (r)
|
|
|
return r;
|
|
|
}
|
|
@@ -220,54 +235,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
return 0;
|
|
|
|
|
|
return amdgpu_vm_validate_level(&vm->root, validate, param,
|
|
|
- vm->use_cpu_for_update);
|
|
|
+ vm->use_cpu_for_update,
|
|
|
+ adev->mman.bdev.glob);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
|
|
|
- *
|
|
|
- * @adev: amdgpu device instance
|
|
|
- * @vm: vm providing the BOs
|
|
|
- *
|
|
|
- * Move the PT BOs to the tail of the LRU.
|
|
|
- */
|
|
|
-static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
|
|
|
-{
|
|
|
- unsigned i;
|
|
|
-
|
|
|
- if (!parent->entries)
|
|
|
- return;
|
|
|
-
|
|
|
- for (i = 0; i <= parent->last_entry_used; ++i) {
|
|
|
- struct amdgpu_vm_pt *entry = &parent->entries[i];
|
|
|
-
|
|
|
- if (!entry->bo)
|
|
|
- continue;
|
|
|
-
|
|
|
- ttm_bo_move_to_lru_tail(&entry->bo->tbo);
|
|
|
- amdgpu_vm_move_level_in_lru(entry);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
|
|
|
- *
|
|
|
- * @adev: amdgpu device instance
|
|
|
- * @vm: vm providing the BOs
|
|
|
- *
|
|
|
- * Move the PT BOs to the tail of the LRU.
|
|
|
- */
|
|
|
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
|
|
|
- struct amdgpu_vm *vm)
|
|
|
-{
|
|
|
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
|
|
|
-
|
|
|
- spin_lock(&glob->lru_lock);
|
|
|
- amdgpu_vm_move_level_in_lru(&vm->root);
|
|
|
- spin_unlock(&glob->lru_lock);
|
|
|
-}
|
|
|
-
|
|
|
- /**
|
|
|
* amdgpu_vm_alloc_levels - allocate the PD/PT levels
|
|
|
*
|
|
|
* @adev: amdgpu_device pointer
|
|
@@ -359,7 +331,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|
|
|
|
|
entry->bo = pt;
|
|
|
entry->addr = 0;
|
|
|
- entry->huge_page = false;
|
|
|
}
|
|
|
|
|
|
if (level < adev->vm_manager.num_level) {
|
|
@@ -899,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
|
|
{
|
|
|
struct amdgpu_bo_va *bo_va;
|
|
|
|
|
|
- list_for_each_entry(bo_va, &bo->va, bo_list) {
|
|
|
- if (bo_va->vm == vm) {
|
|
|
+ list_for_each_entry(bo_va, &bo->va, base.bo_list) {
|
|
|
+ if (bo_va->base.vm == vm) {
|
|
|
return bo_va;
|
|
|
}
|
|
|
}
|
|
@@ -1074,11 +1045,6 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|
|
|
|
|
params.func = amdgpu_vm_cpu_set_ptes;
|
|
|
} else {
|
|
|
- if (shadow) {
|
|
|
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
- }
|
|
|
ring = container_of(vm->entity.sched, struct amdgpu_ring,
|
|
|
sched);
|
|
|
|
|
@@ -1114,22 +1080,14 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|
|
if (bo == NULL)
|
|
|
continue;
|
|
|
|
|
|
- if (bo->shadow) {
|
|
|
- struct amdgpu_bo *pt_shadow = bo->shadow;
|
|
|
-
|
|
|
- r = amdgpu_ttm_bind(&pt_shadow->tbo,
|
|
|
- &pt_shadow->tbo.mem);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
- }
|
|
|
-
|
|
|
pt = amdgpu_bo_gpu_offset(bo);
|
|
|
pt = amdgpu_gart_get_vm_pde(adev, pt);
|
|
|
- if (parent->entries[pt_idx].addr == pt ||
|
|
|
- parent->entries[pt_idx].huge_page)
|
|
|
+ /* Don't update huge pages here */
|
|
|
+ if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
|
|
|
+ parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
|
|
|
continue;
|
|
|
|
|
|
- parent->entries[pt_idx].addr = pt;
|
|
|
+ parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
|
|
|
|
|
|
pde = pd_addr + pt_idx * 8;
|
|
|
if (((last_pde + 8 * count) != pde) ||
|
|
@@ -1307,15 +1265,14 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
|
|
|
*
|
|
|
* Check if we can update the PD with a huge page.
|
|
|
*/
|
|
|
-static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
|
|
|
- struct amdgpu_vm_pt *entry,
|
|
|
- struct amdgpu_vm_pt *parent,
|
|
|
- unsigned nptes, uint64_t dst,
|
|
|
- uint64_t flags)
|
|
|
+static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
|
|
|
+ struct amdgpu_vm_pt *entry,
|
|
|
+ struct amdgpu_vm_pt *parent,
|
|
|
+ unsigned nptes, uint64_t dst,
|
|
|
+ uint64_t flags)
|
|
|
{
|
|
|
bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
|
|
|
uint64_t pd_addr, pde;
|
|
|
- int r;
|
|
|
|
|
|
/* In the case of a mixed PT the PDE must point to it*/
|
|
|
if (p->adev->asic_type < CHIP_VEGA10 ||
|
|
@@ -1327,21 +1284,17 @@ static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
|
|
|
dst = amdgpu_gart_get_vm_pde(p->adev, dst);
|
|
|
flags = AMDGPU_PTE_VALID;
|
|
|
} else {
|
|
|
+ /* Set the huge page flag to stop scanning at this PDE */
|
|
|
flags |= AMDGPU_PDE_PTE;
|
|
|
}
|
|
|
|
|
|
- if (entry->addr == dst &&
|
|
|
- entry->huge_page == !!(flags & AMDGPU_PDE_PTE))
|
|
|
- return 0;
|
|
|
+ if (entry->addr == (dst | flags))
|
|
|
+ return;
|
|
|
|
|
|
- entry->addr = dst;
|
|
|
- entry->huge_page = !!(flags & AMDGPU_PDE_PTE);
|
|
|
+ entry->addr = (dst | flags);
|
|
|
|
|
|
if (use_cpu_update) {
|
|
|
- r = amdgpu_bo_kmap(parent->bo, (void *)&pd_addr);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
-
|
|
|
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
|
|
|
pde = pd_addr + (entry - parent->entries) * 8;
|
|
|
amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
|
|
|
} else {
|
|
@@ -1354,8 +1307,6 @@ static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
|
|
|
pde = pd_addr + (entry - parent->entries) * 8;
|
|
|
amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
|
|
|
}
|
|
|
-
|
|
|
- return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1382,7 +1333,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
|
|
struct amdgpu_bo *pt;
|
|
|
unsigned nptes;
|
|
|
bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
|
|
|
- int r;
|
|
|
|
|
|
/* walk over the address space and update the page tables */
|
|
|
for (addr = start; addr < end; addr += nptes,
|
|
@@ -1398,12 +1348,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
|
|
else
|
|
|
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
|
|
|
|
|
|
- r = amdgpu_vm_handle_huge_pages(params, entry, parent,
|
|
|
- nptes, dst, flags);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
-
|
|
|
- if (entry->huge_page)
|
|
|
+ amdgpu_vm_handle_huge_pages(params, entry, parent,
|
|
|
+ nptes, dst, flags);
|
|
|
+ /* We don't need to update PTEs for huge pages */
|
|
|
+ if (entry->addr & AMDGPU_PDE_PTE)
|
|
|
continue;
|
|
|
|
|
|
pt = entry->bo;
|
|
@@ -1462,9 +1410,7 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
|
|
|
* Userspace can support this by aligning virtual base address and
|
|
|
* allocation size to the fragment size.
|
|
|
*/
|
|
|
-
|
|
|
- /* SI and newer are optimized for 64KB */
|
|
|
- unsigned pages_per_frag = AMDGPU_LOG2_PAGES_PER_FRAG(params->adev);
|
|
|
+ unsigned pages_per_frag = params->adev->vm_manager.fragment_size;
|
|
|
uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
|
|
|
uint64_t frag_align = 1 << pages_per_frag;
|
|
|
|
|
@@ -1778,7 +1724,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
bool clear)
|
|
|
{
|
|
|
- struct amdgpu_vm *vm = bo_va->vm;
|
|
|
+ struct amdgpu_bo *bo = bo_va->base.bo;
|
|
|
+ struct amdgpu_vm *vm = bo_va->base.vm;
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
|
dma_addr_t *pages_addr = NULL;
|
|
|
uint64_t gtt_flags, flags;
|
|
@@ -1787,27 +1734,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
|
struct dma_fence *exclusive;
|
|
|
int r;
|
|
|
|
|
|
- if (clear || !bo_va->bo) {
|
|
|
+ if (clear || !bo_va->base.bo) {
|
|
|
mem = NULL;
|
|
|
nodes = NULL;
|
|
|
exclusive = NULL;
|
|
|
} else {
|
|
|
struct ttm_dma_tt *ttm;
|
|
|
|
|
|
- mem = &bo_va->bo->tbo.mem;
|
|
|
+ mem = &bo_va->base.bo->tbo.mem;
|
|
|
nodes = mem->mm_node;
|
|
|
if (mem->mem_type == TTM_PL_TT) {
|
|
|
- ttm = container_of(bo_va->bo->tbo.ttm, struct
|
|
|
- ttm_dma_tt, ttm);
|
|
|
+ ttm = container_of(bo_va->base.bo->tbo.ttm,
|
|
|
+ struct ttm_dma_tt, ttm);
|
|
|
pages_addr = ttm->dma_address;
|
|
|
}
|
|
|
- exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
|
|
|
+ exclusive = reservation_object_get_excl(bo->tbo.resv);
|
|
|
}
|
|
|
|
|
|
- if (bo_va->bo) {
|
|
|
- flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
|
|
|
- gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
|
|
|
- adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
|
|
|
+ if (bo) {
|
|
|
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
|
|
|
+ gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
|
|
|
+ adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
|
|
|
flags : 0;
|
|
|
} else {
|
|
|
flags = 0x0;
|
|
@@ -1815,7 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
|
}
|
|
|
|
|
|
spin_lock(&vm->status_lock);
|
|
|
- if (!list_empty(&bo_va->vm_status))
|
|
|
+ if (!list_empty(&bo_va->base.vm_status))
|
|
|
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
@@ -1838,9 +1785,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
|
|
|
|
spin_lock(&vm->status_lock);
|
|
|
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
|
|
- list_del_init(&bo_va->vm_status);
|
|
|
+ list_del_init(&bo_va->base.vm_status);
|
|
|
if (clear)
|
|
|
- list_add(&bo_va->vm_status, &vm->cleared);
|
|
|
+ list_add(&bo_va->base.vm_status, &vm->cleared);
|
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
|
if (vm->use_cpu_for_update) {
|
|
@@ -2034,26 +1981,26 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
|
|
|
+ * amdgpu_vm_clear_moved - clear moved BOs in the PT
|
|
|
*
|
|
|
* @adev: amdgpu_device pointer
|
|
|
* @vm: requested vm
|
|
|
*
|
|
|
- * Make sure all invalidated BOs are cleared in the PT.
|
|
|
+ * Make sure all moved BOs are cleared in the PT.
|
|
|
* Returns 0 for success.
|
|
|
*
|
|
|
* PTs have to be reserved and mutex must be locked!
|
|
|
*/
|
|
|
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
|
|
- struct amdgpu_vm *vm, struct amdgpu_sync *sync)
|
|
|
+int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
+ struct amdgpu_sync *sync)
|
|
|
{
|
|
|
struct amdgpu_bo_va *bo_va = NULL;
|
|
|
int r = 0;
|
|
|
|
|
|
spin_lock(&vm->status_lock);
|
|
|
- while (!list_empty(&vm->invalidated)) {
|
|
|
- bo_va = list_first_entry(&vm->invalidated,
|
|
|
- struct amdgpu_bo_va, vm_status);
|
|
|
+ while (!list_empty(&vm->moved)) {
|
|
|
+ bo_va = list_first_entry(&vm->moved,
|
|
|
+ struct amdgpu_bo_va, base.vm_status);
|
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
|
r = amdgpu_vm_bo_update(adev, bo_va, true);
|
|
@@ -2093,16 +2040,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|
|
if (bo_va == NULL) {
|
|
|
return NULL;
|
|
|
}
|
|
|
- bo_va->vm = vm;
|
|
|
- bo_va->bo = bo;
|
|
|
+ bo_va->base.vm = vm;
|
|
|
+ bo_va->base.bo = bo;
|
|
|
+ INIT_LIST_HEAD(&bo_va->base.bo_list);
|
|
|
+ INIT_LIST_HEAD(&bo_va->base.vm_status);
|
|
|
+
|
|
|
bo_va->ref_count = 1;
|
|
|
- INIT_LIST_HEAD(&bo_va->bo_list);
|
|
|
INIT_LIST_HEAD(&bo_va->valids);
|
|
|
INIT_LIST_HEAD(&bo_va->invalids);
|
|
|
- INIT_LIST_HEAD(&bo_va->vm_status);
|
|
|
|
|
|
if (bo)
|
|
|
- list_add_tail(&bo_va->bo_list, &bo->va);
|
|
|
+ list_add_tail(&bo_va->base.bo_list, &bo->va);
|
|
|
|
|
|
return bo_va;
|
|
|
}
|
|
@@ -2127,7 +2075,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
uint64_t size, uint64_t flags)
|
|
|
{
|
|
|
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
|
|
- struct amdgpu_vm *vm = bo_va->vm;
|
|
|
+ struct amdgpu_bo *bo = bo_va->base.bo;
|
|
|
+ struct amdgpu_vm *vm = bo_va->base.vm;
|
|
|
uint64_t eaddr;
|
|
|
|
|
|
/* validate the parameters */
|
|
@@ -2138,7 +2087,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
/* make sure object fit at this offset */
|
|
|
eaddr = saddr + size - 1;
|
|
|
if (saddr >= eaddr ||
|
|
|
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
|
|
|
+ (bo && offset + size > amdgpu_bo_size(bo)))
|
|
|
return -EINVAL;
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
@@ -2148,7 +2097,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
if (tmp) {
|
|
|
/* bo and tmp overlap, invalid addr */
|
|
|
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
|
|
|
- "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
|
|
|
+ "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
|
|
|
tmp->start, tmp->last + 1);
|
|
|
return -EINVAL;
|
|
|
}
|
|
@@ -2193,7 +2142,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|
|
uint64_t size, uint64_t flags)
|
|
|
{
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
|
- struct amdgpu_vm *vm = bo_va->vm;
|
|
|
+ struct amdgpu_bo *bo = bo_va->base.bo;
|
|
|
+ struct amdgpu_vm *vm = bo_va->base.vm;
|
|
|
uint64_t eaddr;
|
|
|
int r;
|
|
|
|
|
@@ -2205,7 +2155,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|
|
/* make sure object fit at this offset */
|
|
|
eaddr = saddr + size - 1;
|
|
|
if (saddr >= eaddr ||
|
|
|
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
|
|
|
+ (bo && offset + size > amdgpu_bo_size(bo)))
|
|
|
return -EINVAL;
|
|
|
|
|
|
/* Allocate all the needed memory */
|
|
@@ -2213,7 +2163,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|
|
if (!mapping)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
|
|
|
+ r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
|
|
|
if (r) {
|
|
|
kfree(mapping);
|
|
|
return r;
|
|
@@ -2253,7 +2203,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|
|
uint64_t saddr)
|
|
|
{
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
|
- struct amdgpu_vm *vm = bo_va->vm;
|
|
|
+ struct amdgpu_vm *vm = bo_va->base.vm;
|
|
|
bool valid = true;
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
@@ -2401,12 +2351,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|
|
struct amdgpu_bo_va *bo_va)
|
|
|
{
|
|
|
struct amdgpu_bo_va_mapping *mapping, *next;
|
|
|
- struct amdgpu_vm *vm = bo_va->vm;
|
|
|
+ struct amdgpu_vm *vm = bo_va->base.vm;
|
|
|
|
|
|
- list_del(&bo_va->bo_list);
|
|
|
+ list_del(&bo_va->base.bo_list);
|
|
|
|
|
|
spin_lock(&vm->status_lock);
|
|
|
- list_del(&bo_va->vm_status);
|
|
|
+ list_del(&bo_va->base.vm_status);
|
|
|
spin_unlock(&vm->status_lock);
|
|
|
|
|
|
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
|
@@ -2438,13 +2388,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|
|
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
|
|
struct amdgpu_bo *bo)
|
|
|
{
|
|
|
- struct amdgpu_bo_va *bo_va;
|
|
|
+ struct amdgpu_vm_bo_base *bo_base;
|
|
|
|
|
|
- list_for_each_entry(bo_va, &bo->va, bo_list) {
|
|
|
- spin_lock(&bo_va->vm->status_lock);
|
|
|
- if (list_empty(&bo_va->vm_status))
|
|
|
- list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
|
|
|
- spin_unlock(&bo_va->vm->status_lock);
|
|
|
+ list_for_each_entry(bo_base, &bo->va, bo_list) {
|
|
|
+ spin_lock(&bo_base->vm->status_lock);
|
|
|
+ if (list_empty(&bo_base->vm_status))
|
|
|
+ list_add(&bo_base->vm_status,
|
|
|
+ &bo_base->vm->moved);
|
|
|
+ spin_unlock(&bo_base->vm->status_lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2462,12 +2413,26 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * amdgpu_vm_adjust_size - adjust vm size and block size
|
|
|
+ * amdgpu_vm_set_fragment_size - adjust fragment size in PTE
|
|
|
+ *
|
|
|
+ * @adev: amdgpu_device pointer
|
|
|
+ * @fragment_size_default: the default fragment size if it's set auto
|
|
|
+ */
|
|
|
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
|
|
|
+{
|
|
|
+ if (amdgpu_vm_fragment_size == -1)
|
|
|
+ adev->vm_manager.fragment_size = fragment_size_default;
|
|
|
+ else
|
|
|
+ adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
|
|
|
*
|
|
|
* @adev: amdgpu_device pointer
|
|
|
* @vm_size: the default vm size if it's set auto
|
|
|
*/
|
|
|
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
|
|
|
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
|
|
|
{
|
|
|
/* adjust vm size firstly */
|
|
|
if (amdgpu_vm_size == -1)
|
|
@@ -2482,8 +2447,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
|
|
|
else
|
|
|
adev->vm_manager.block_size = amdgpu_vm_block_size;
|
|
|
|
|
|
- DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
|
|
|
- adev->vm_manager.vm_size, adev->vm_manager.block_size);
|
|
|
+ amdgpu_vm_set_fragment_size(adev, fragment_size_default);
|
|
|
+
|
|
|
+ DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
|
|
|
+ adev->vm_manager.vm_size, adev->vm_manager.block_size,
|
|
|
+ adev->vm_manager.fragment_size);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2512,7 +2480,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
|
|
vm->reserved_vmid[i] = NULL;
|
|
|
spin_lock_init(&vm->status_lock);
|
|
|
- INIT_LIST_HEAD(&vm->invalidated);
|
|
|
+ INIT_LIST_HEAD(&vm->moved);
|
|
|
INIT_LIST_HEAD(&vm->cleared);
|
|
|
INIT_LIST_HEAD(&vm->freed);
|
|
|
|