|
@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
unsigned i;
|
|
unsigned i;
|
|
|
|
|
|
/* check if the id is still valid */
|
|
/* check if the id is still valid */
|
|
- if (vm_id->id && vm_id->last_id_use &&
|
|
|
|
- vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
|
|
|
|
- trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
|
|
|
|
- return 0;
|
|
|
|
|
|
+ if (vm_id->id) {
|
|
|
|
+ unsigned id = vm_id->id;
|
|
|
|
+ long owner;
|
|
|
|
+
|
|
|
|
+ owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
|
|
|
|
+ if (owner == (long)vm) {
|
|
|
|
+ trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
/* we definately need to flush */
|
|
/* we definately need to flush */
|
|
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
|
|
|
|
/* skip over VMID 0, since it is the system VM */
|
|
/* skip over VMID 0, since it is the system VM */
|
|
for (i = 1; i < adev->vm_manager.nvm; ++i) {
|
|
for (i = 1; i < adev->vm_manager.nvm; ++i) {
|
|
- struct fence *fence = adev->vm_manager.active[i];
|
|
|
|
|
|
+ struct fence *fence = adev->vm_manager.ids[i].active;
|
|
struct amdgpu_ring *fring;
|
|
struct amdgpu_ring *fring;
|
|
|
|
|
|
if (fence == NULL) {
|
|
if (fence == NULL) {
|
|
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
if (choices[i]) {
|
|
if (choices[i]) {
|
|
struct fence *fence;
|
|
struct fence *fence;
|
|
|
|
|
|
- fence = adev->vm_manager.active[choices[i]];
|
|
|
|
|
|
+ fence = adev->vm_manager.ids[choices[i]].active;
|
|
vm_id->id = choices[i];
|
|
vm_id->id = choices[i];
|
|
|
|
|
|
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
|
|
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
|
|
@@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
|
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
|
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
|
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
|
|
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
|
|
struct fence *flushed_updates = vm_id->flushed_updates;
|
|
struct fence *flushed_updates = vm_id->flushed_updates;
|
|
- bool is_earlier = false;
|
|
|
|
-
|
|
|
|
- if (flushed_updates && updates) {
|
|
|
|
- BUG_ON(flushed_updates->context != updates->context);
|
|
|
|
- is_earlier = (updates->seqno - flushed_updates->seqno <=
|
|
|
|
- INT_MAX) ? true : false;
|
|
|
|
- }
|
|
|
|
|
|
+ bool is_later;
|
|
|
|
|
|
- if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
|
|
|
|
- is_earlier) {
|
|
|
|
|
|
+ if (!flushed_updates)
|
|
|
|
+ is_later = true;
|
|
|
|
+ else if (!updates)
|
|
|
|
+ is_later = false;
|
|
|
|
+ else
|
|
|
|
+ is_later = fence_is_later(updates, flushed_updates);
|
|
|
|
|
|
|
|
+ if (pd_addr != vm_id->pd_gpu_addr || is_later) {
|
|
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
|
|
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
|
|
- if (is_earlier) {
|
|
|
|
|
|
+ if (is_later) {
|
|
vm_id->flushed_updates = fence_get(updates);
|
|
vm_id->flushed_updates = fence_get(updates);
|
|
fence_put(flushed_updates);
|
|
fence_put(flushed_updates);
|
|
}
|
|
}
|
|
- if (!flushed_updates)
|
|
|
|
- vm_id->flushed_updates = fence_get(updates);
|
|
|
|
vm_id->pd_gpu_addr = pd_addr;
|
|
vm_id->pd_gpu_addr = pd_addr;
|
|
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
|
|
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
|
|
}
|
|
}
|
|
@@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
|
*/
|
|
*/
|
|
void amdgpu_vm_fence(struct amdgpu_device *adev,
|
|
void amdgpu_vm_fence(struct amdgpu_device *adev,
|
|
struct amdgpu_vm *vm,
|
|
struct amdgpu_vm *vm,
|
|
- struct amdgpu_fence *fence)
|
|
|
|
|
|
+ struct fence *fence)
|
|
{
|
|
{
|
|
- unsigned ridx = fence->ring->idx;
|
|
|
|
- unsigned vm_id = vm->ids[ridx].id;
|
|
|
|
-
|
|
|
|
- fence_put(adev->vm_manager.active[vm_id]);
|
|
|
|
- adev->vm_manager.active[vm_id] = fence_get(&fence->base);
|
|
|
|
|
|
+ struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
|
|
|
|
+ unsigned vm_id = vm->ids[ring->idx].id;
|
|
|
|
|
|
- fence_put(vm->ids[ridx].last_id_use);
|
|
|
|
- vm->ids[ridx].last_id_use = fence_get(&fence->base);
|
|
|
|
|
|
+ fence_put(adev->vm_manager.ids[vm_id].active);
|
|
|
|
+ adev->vm_manager.ids[vm_id].active = fence_get(fence);
|
|
|
|
+ atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
|
|
*
|
|
*
|
|
* @adev: amdgpu_device pointer
|
|
* @adev: amdgpu_device pointer
|
|
* @bo: bo to clear
|
|
* @bo: bo to clear
|
|
|
|
+ *
|
|
|
|
+ * need to reserve bo first before calling it.
|
|
*/
|
|
*/
|
|
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|
struct amdgpu_bo *bo)
|
|
struct amdgpu_bo *bo)
|
|
@@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|
uint64_t addr;
|
|
uint64_t addr;
|
|
int r;
|
|
int r;
|
|
|
|
|
|
- r = amdgpu_bo_reserve(bo, false);
|
|
|
|
- if (r)
|
|
|
|
- return r;
|
|
|
|
-
|
|
|
|
r = reservation_object_reserve_shared(bo->tbo.resv);
|
|
r = reservation_object_reserve_shared(bo->tbo.resv);
|
|
if (r)
|
|
if (r)
|
|
return r;
|
|
return r;
|
|
|
|
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
|
if (r)
|
|
if (r)
|
|
- goto error_unreserve;
|
|
|
|
|
|
+ goto error;
|
|
|
|
|
|
addr = amdgpu_bo_gpu_offset(bo);
|
|
addr = amdgpu_bo_gpu_offset(bo);
|
|
entries = amdgpu_bo_size(bo) / 8;
|
|
entries = amdgpu_bo_size(bo) / 8;
|
|
|
|
|
|
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
|
|
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
|
|
if (!ib)
|
|
if (!ib)
|
|
- goto error_unreserve;
|
|
|
|
|
|
+ goto error;
|
|
|
|
|
|
r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
|
|
r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
|
|
if (r)
|
|
if (r)
|
|
@@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|
if (!r)
|
|
if (!r)
|
|
amdgpu_bo_fence(bo, fence, true);
|
|
amdgpu_bo_fence(bo, fence, true);
|
|
fence_put(fence);
|
|
fence_put(fence);
|
|
- if (amdgpu_enable_scheduler) {
|
|
|
|
- amdgpu_bo_unreserve(bo);
|
|
|
|
|
|
+ if (amdgpu_enable_scheduler)
|
|
return 0;
|
|
return 0;
|
|
- }
|
|
|
|
|
|
+
|
|
error_free:
|
|
error_free:
|
|
amdgpu_ib_free(adev, ib);
|
|
amdgpu_ib_free(adev, ib);
|
|
kfree(ib);
|
|
kfree(ib);
|
|
|
|
|
|
-error_unreserve:
|
|
|
|
- amdgpu_bo_unreserve(bo);
|
|
|
|
|
|
+error:
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -989,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|
* Add a mapping of the BO at the specefied addr into the VM.
|
|
* Add a mapping of the BO at the specefied addr into the VM.
|
|
* Returns 0 for success, error for failure.
|
|
* Returns 0 for success, error for failure.
|
|
*
|
|
*
|
|
- * Object has to be reserved and gets unreserved by this function!
|
|
|
|
|
|
+ * Object has to be reserved and unreserved outside!
|
|
*/
|
|
*/
|
|
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
struct amdgpu_bo_va *bo_va,
|
|
struct amdgpu_bo_va *bo_va,
|
|
@@ -1005,30 +1001,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
|
|
|
/* validate the parameters */
|
|
/* validate the parameters */
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
|
- size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
|
|
|
|
- amdgpu_bo_unreserve(bo_va->bo);
|
|
|
|
|
|
+ size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
- }
|
|
|
|
|
|
|
|
/* make sure object fit at this offset */
|
|
/* make sure object fit at this offset */
|
|
eaddr = saddr + size;
|
|
eaddr = saddr + size;
|
|
- if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
|
|
|
|
- amdgpu_bo_unreserve(bo_va->bo);
|
|
|
|
|
|
+ if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
- }
|
|
|
|
|
|
|
|
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
|
|
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
|
|
if (last_pfn > adev->vm_manager.max_pfn) {
|
|
if (last_pfn > adev->vm_manager.max_pfn) {
|
|
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
|
|
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
|
|
last_pfn, adev->vm_manager.max_pfn);
|
|
last_pfn, adev->vm_manager.max_pfn);
|
|
- amdgpu_bo_unreserve(bo_va->bo);
|
|
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
|
|
|
+ spin_lock(&vm->it_lock);
|
|
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
|
|
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
|
|
|
|
+ spin_unlock(&vm->it_lock);
|
|
if (it) {
|
|
if (it) {
|
|
struct amdgpu_bo_va_mapping *tmp;
|
|
struct amdgpu_bo_va_mapping *tmp;
|
|
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
|
|
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
|
|
@@ -1036,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
|
|
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
|
|
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
|
|
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
|
|
tmp->it.start, tmp->it.last + 1);
|
|
tmp->it.start, tmp->it.last + 1);
|
|
- amdgpu_bo_unreserve(bo_va->bo);
|
|
|
|
r = -EINVAL;
|
|
r = -EINVAL;
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
|
|
|
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
|
if (!mapping) {
|
|
if (!mapping) {
|
|
- amdgpu_bo_unreserve(bo_va->bo);
|
|
|
|
r = -ENOMEM;
|
|
r = -ENOMEM;
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
@@ -1055,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
mapping->flags = flags;
|
|
mapping->flags = flags;
|
|
|
|
|
|
list_add(&mapping->list, &bo_va->invalids);
|
|
list_add(&mapping->list, &bo_va->invalids);
|
|
|
|
+ spin_lock(&vm->it_lock);
|
|
interval_tree_insert(&mapping->it, &vm->va);
|
|
interval_tree_insert(&mapping->it, &vm->va);
|
|
|
|
+ spin_unlock(&vm->it_lock);
|
|
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
|
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
|
|
|
|
|
/* Make sure the page tables are allocated */
|
|
/* Make sure the page tables are allocated */
|
|
@@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
if (eaddr > vm->max_pde_used)
|
|
if (eaddr > vm->max_pde_used)
|
|
vm->max_pde_used = eaddr;
|
|
vm->max_pde_used = eaddr;
|
|
|
|
|
|
- amdgpu_bo_unreserve(bo_va->bo);
|
|
|
|
-
|
|
|
|
/* walk over the address space and allocate the page tables */
|
|
/* walk over the address space and allocate the page tables */
|
|
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
|
|
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
|
|
struct reservation_object *resv = vm->page_directory->tbo.resv;
|
|
struct reservation_object *resv = vm->page_directory->tbo.resv;
|
|
@@ -1077,13 +1068,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
if (vm->page_tables[pt_idx].bo)
|
|
if (vm->page_tables[pt_idx].bo)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
- ww_mutex_lock(&resv->lock, NULL);
|
|
|
|
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
|
|
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
|
|
AMDGPU_GPU_PAGE_SIZE, true,
|
|
AMDGPU_GPU_PAGE_SIZE, true,
|
|
AMDGPU_GEM_DOMAIN_VRAM,
|
|
AMDGPU_GEM_DOMAIN_VRAM,
|
|
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
|
|
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
|
|
NULL, resv, &pt);
|
|
NULL, resv, &pt);
|
|
- ww_mutex_unlock(&resv->lock);
|
|
|
|
if (r)
|
|
if (r)
|
|
goto error_free;
|
|
goto error_free;
|
|
|
|
|
|
@@ -1101,7 +1090,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
|
|
|
error_free:
|
|
error_free:
|
|
list_del(&mapping->list);
|
|
list_del(&mapping->list);
|
|
|
|
+ spin_lock(&vm->it_lock);
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
|
|
+ spin_unlock(&vm->it_lock);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
kfree(mapping);
|
|
kfree(mapping);
|
|
|
|
|
|
@@ -1119,7 +1110,7 @@ error:
|
|
* Remove a mapping of the BO at the specefied addr from the VM.
|
|
* Remove a mapping of the BO at the specefied addr from the VM.
|
|
* Returns 0 for success, error for failure.
|
|
* Returns 0 for success, error for failure.
|
|
*
|
|
*
|
|
- * Object has to be reserved and gets unreserved by this function!
|
|
|
|
|
|
+ * Object has to be reserved and unreserved outside!
|
|
*/
|
|
*/
|
|
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|
struct amdgpu_bo_va *bo_va,
|
|
struct amdgpu_bo_va *bo_va,
|
|
@@ -1144,21 +1135,20 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- if (&mapping->list == &bo_va->invalids) {
|
|
|
|
- amdgpu_bo_unreserve(bo_va->bo);
|
|
|
|
|
|
+ if (&mapping->list == &bo_va->invalids)
|
|
return -ENOENT;
|
|
return -ENOENT;
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
list_del(&mapping->list);
|
|
list_del(&mapping->list);
|
|
|
|
+ spin_lock(&vm->it_lock);
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
|
|
+ spin_unlock(&vm->it_lock);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
|
|
|
|
if (valid)
|
|
if (valid)
|
|
list_add(&mapping->list, &vm->freed);
|
|
list_add(&mapping->list, &vm->freed);
|
|
else
|
|
else
|
|
kfree(mapping);
|
|
kfree(mapping);
|
|
- amdgpu_bo_unreserve(bo_va->bo);
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1187,13 +1177,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|
|
|
|
|
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
|
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
|
list_del(&mapping->list);
|
|
list_del(&mapping->list);
|
|
|
|
+ spin_lock(&vm->it_lock);
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
|
|
+ spin_unlock(&vm->it_lock);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
|
list_add(&mapping->list, &vm->freed);
|
|
list_add(&mapping->list, &vm->freed);
|
|
}
|
|
}
|
|
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
|
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
|
list_del(&mapping->list);
|
|
list_del(&mapping->list);
|
|
|
|
+ spin_lock(&vm->it_lock);
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
interval_tree_remove(&mapping->it, &vm->va);
|
|
|
|
+ spin_unlock(&vm->it_lock);
|
|
kfree(mapping);
|
|
kfree(mapping);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1241,7 +1235,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
vm->ids[i].id = 0;
|
|
vm->ids[i].id = 0;
|
|
vm->ids[i].flushed_updates = NULL;
|
|
vm->ids[i].flushed_updates = NULL;
|
|
- vm->ids[i].last_id_use = NULL;
|
|
|
|
}
|
|
}
|
|
mutex_init(&vm->mutex);
|
|
mutex_init(&vm->mutex);
|
|
vm->va = RB_ROOT;
|
|
vm->va = RB_ROOT;
|
|
@@ -1249,7 +1242,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
INIT_LIST_HEAD(&vm->invalidated);
|
|
INIT_LIST_HEAD(&vm->invalidated);
|
|
INIT_LIST_HEAD(&vm->cleared);
|
|
INIT_LIST_HEAD(&vm->cleared);
|
|
INIT_LIST_HEAD(&vm->freed);
|
|
INIT_LIST_HEAD(&vm->freed);
|
|
-
|
|
|
|
|
|
+ spin_lock_init(&vm->it_lock);
|
|
pd_size = amdgpu_vm_directory_size(adev);
|
|
pd_size = amdgpu_vm_directory_size(adev);
|
|
pd_entries = amdgpu_vm_num_pdes(adev);
|
|
pd_entries = amdgpu_vm_num_pdes(adev);
|
|
|
|
|
|
@@ -1269,8 +1262,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
NULL, NULL, &vm->page_directory);
|
|
NULL, NULL, &vm->page_directory);
|
|
if (r)
|
|
if (r)
|
|
return r;
|
|
return r;
|
|
-
|
|
|
|
|
|
+ r = amdgpu_bo_reserve(vm->page_directory, false);
|
|
|
|
+ if (r) {
|
|
|
|
+ amdgpu_bo_unref(&vm->page_directory);
|
|
|
|
+ vm->page_directory = NULL;
|
|
|
|
+ return r;
|
|
|
|
+ }
|
|
r = amdgpu_vm_clear_bo(adev, vm->page_directory);
|
|
r = amdgpu_vm_clear_bo(adev, vm->page_directory);
|
|
|
|
+ amdgpu_bo_unreserve(vm->page_directory);
|
|
if (r) {
|
|
if (r) {
|
|
amdgpu_bo_unref(&vm->page_directory);
|
|
amdgpu_bo_unref(&vm->page_directory);
|
|
vm->page_directory = NULL;
|
|
vm->page_directory = NULL;
|
|
@@ -1313,11 +1312,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
|
|
amdgpu_bo_unref(&vm->page_directory);
|
|
amdgpu_bo_unref(&vm->page_directory);
|
|
fence_put(vm->page_directory_fence);
|
|
fence_put(vm->page_directory_fence);
|
|
-
|
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
|
|
|
+ unsigned id = vm->ids[i].id;
|
|
|
|
+
|
|
|
|
+ atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
|
|
|
|
+ (long)vm, 0);
|
|
fence_put(vm->ids[i].flushed_updates);
|
|
fence_put(vm->ids[i].flushed_updates);
|
|
- fence_put(vm->ids[i].last_id_use);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
mutex_destroy(&vm->mutex);
|
|
mutex_destroy(&vm->mutex);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * amdgpu_vm_manager_fini - cleanup VM manager
|
|
|
|
+ *
|
|
|
|
+ * @adev: amdgpu_device pointer
|
|
|
|
+ *
|
|
|
|
+ * Cleanup the VM manager and free resources.
|
|
|
|
+ */
|
|
|
|
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
|
|
|
+{
|
|
|
|
+ unsigned i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
|
|
|
+ fence_put(adev->vm_manager.ids[i].active);
|
|
|
|
+}
|