|
@@ -406,6 +406,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
|
struct amdgpu_job *job)
|
|
|
{
|
|
|
struct amdgpu_device *adev = ring->adev;
|
|
|
+ /* Temporary use only the first VM manager */
|
|
|
+ unsigned vmhub = 0; /*ring->funcs->vmhub;*/
|
|
|
+ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
uint64_t fence_context = adev->fence_context + ring->idx;
|
|
|
struct dma_fence *updates = sync->last_vm_update;
|
|
|
struct amdgpu_vm_id *id, *idle;
|
|
@@ -413,16 +416,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
|
unsigned i;
|
|
|
int r = 0;
|
|
|
|
|
|
- fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
|
|
|
- GFP_KERNEL);
|
|
|
+ fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
|
|
|
if (!fences)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- mutex_lock(&adev->vm_manager.lock);
|
|
|
+ mutex_lock(&id_mgr->lock);
|
|
|
|
|
|
/* Check if we have an idle VMID */
|
|
|
i = 0;
|
|
|
- list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
|
|
|
+ list_for_each_entry(idle, &id_mgr->ids_lru, list) {
|
|
|
fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
|
|
|
if (!fences[i])
|
|
|
break;
|
|
@@ -430,7 +432,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
|
}
|
|
|
|
|
|
/* If we can't find a idle VMID to use, wait till one becomes available */
|
|
|
- if (&idle->list == &adev->vm_manager.ids_lru) {
|
|
|
+ if (&idle->list == &id_mgr->ids_lru) {
|
|
|
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
|
|
|
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
|
|
|
struct dma_fence_array *array;
|
|
@@ -455,7 +457,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
|
if (r)
|
|
|
goto error;
|
|
|
|
|
|
- mutex_unlock(&adev->vm_manager.lock);
|
|
|
+ mutex_unlock(&id_mgr->lock);
|
|
|
return 0;
|
|
|
|
|
|
}
|
|
@@ -463,7 +465,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
|
|
|
|
job->vm_needs_flush = true;
|
|
|
/* Check if we can use a VMID already assigned to this VM */
|
|
|
- list_for_each_entry_reverse(id, &adev->vm_manager.ids_lru, list) {
|
|
|
+ list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
|
|
|
struct dma_fence *flushed;
|
|
|
|
|
|
/* Check all the prerequisites to using this VMID */
|
|
@@ -495,13 +497,13 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
|
if (r)
|
|
|
goto error;
|
|
|
|
|
|
- list_move_tail(&id->list, &adev->vm_manager.ids_lru);
|
|
|
+ list_move_tail(&id->list, &id_mgr->ids_lru);
|
|
|
|
|
|
- job->vm_id = id - adev->vm_manager.ids;
|
|
|
+ job->vm_id = id - id_mgr->ids;
|
|
|
job->vm_needs_flush = false;
|
|
|
trace_amdgpu_vm_grab_id(vm, ring->idx, job);
|
|
|
|
|
|
- mutex_unlock(&adev->vm_manager.lock);
|
|
|
+ mutex_unlock(&id_mgr->lock);
|
|
|
return 0;
|
|
|
|
|
|
};
|
|
@@ -522,14 +524,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|
|
|
|
|
id->pd_gpu_addr = job->vm_pd_addr;
|
|
|
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
|
|
|
- list_move_tail(&id->list, &adev->vm_manager.ids_lru);
|
|
|
+ list_move_tail(&id->list, &id_mgr->ids_lru);
|
|
|
atomic64_set(&id->owner, vm->client_id);
|
|
|
|
|
|
- job->vm_id = id - adev->vm_manager.ids;
|
|
|
+ job->vm_id = id - id_mgr->ids;
|
|
|
trace_amdgpu_vm_grab_id(vm, ring->idx, job);
|
|
|
|
|
|
error:
|
|
|
- mutex_unlock(&adev->vm_manager.lock);
|
|
|
+ mutex_unlock(&id_mgr->lock);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -581,7 +583,9 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
|
|
|
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
|
|
|
{
|
|
|
struct amdgpu_device *adev = ring->adev;
|
|
|
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
|
|
|
+ unsigned vmhub = ring->funcs->vmhub;
|
|
|
+ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
+ struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
|
|
|
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
|
|
|
id->gds_base != job->gds_base ||
|
|
|
id->gds_size != job->gds_size ||
|
|
@@ -619,10 +623,10 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
- mutex_lock(&adev->vm_manager.lock);
|
|
|
+ mutex_lock(&id_mgr->lock);
|
|
|
dma_fence_put(id->last_flush);
|
|
|
id->last_flush = fence;
|
|
|
- mutex_unlock(&adev->vm_manager.lock);
|
|
|
+ mutex_unlock(&id_mgr->lock);
|
|
|
}
|
|
|
|
|
|
if (gds_switch_needed) {
|
|
@@ -657,9 +661,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
|
|
|
*
|
|
|
* Reset saved GDW, GWS and OA to force switch on next flush.
|
|
|
*/
|
|
|
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
|
|
|
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
|
|
|
+ unsigned vmid)
|
|
|
{
|
|
|
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
|
|
|
+ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
+ struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
|
|
|
|
|
|
id->gds_base = 0;
|
|
|
id->gds_size = 0;
|
|
@@ -2230,16 +2236,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
*/
|
|
|
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|
|
{
|
|
|
- unsigned i;
|
|
|
+ unsigned i, j;
|
|
|
+
|
|
|
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
|
|
|
+ struct amdgpu_vm_id_manager *id_mgr =
|
|
|
+ &adev->vm_manager.id_mgr[i];
|
|
|
|
|
|
- INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
|
|
|
+ mutex_init(&id_mgr->lock);
|
|
|
+ INIT_LIST_HEAD(&id_mgr->ids_lru);
|
|
|
|
|
|
- /* skip over VMID 0, since it is the system VM */
|
|
|
- for (i = 1; i < adev->vm_manager.num_ids; ++i) {
|
|
|
- amdgpu_vm_reset_id(adev, i);
|
|
|
- amdgpu_sync_create(&adev->vm_manager.ids[i].active);
|
|
|
- list_add_tail(&adev->vm_manager.ids[i].list,
|
|
|
- &adev->vm_manager.ids_lru);
|
|
|
+ /* skip over VMID 0, since it is the system VM */
|
|
|
+ for (j = 1; j < id_mgr->num_ids; ++j) {
|
|
|
+ amdgpu_vm_reset_id(adev, i, j);
|
|
|
+ amdgpu_sync_create(&id_mgr->ids[i].active);
|
|
|
+ list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
adev->vm_manager.fence_context =
|
|
@@ -2247,6 +2258,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
|
|
adev->vm_manager.seqno[i] = 0;
|
|
|
|
|
|
+
|
|
|
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
|
|
|
atomic64_set(&adev->vm_manager.client_counter, 0);
|
|
|
spin_lock_init(&adev->vm_manager.prt_lock);
|
|
@@ -2262,13 +2274,19 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|
|
*/
|
|
|
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
|
|
{
|
|
|
- unsigned i;
|
|
|
+ unsigned i, j;
|
|
|
|
|
|
- for (i = 0; i < AMDGPU_NUM_VM; ++i) {
|
|
|
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
|
|
|
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
|
|
|
+ struct amdgpu_vm_id_manager *id_mgr =
|
|
|
+ &adev->vm_manager.id_mgr[i];
|
|
|
|
|
|
- amdgpu_sync_free(&adev->vm_manager.ids[i].active);
|
|
|
- dma_fence_put(id->flushed_updates);
|
|
|
- dma_fence_put(id->last_flush);
|
|
|
+ mutex_destroy(&id_mgr->lock);
|
|
|
+ for (j = 0; j < AMDGPU_NUM_VM; ++j) {
|
|
|
+ struct amdgpu_vm_id *id = &id_mgr->ids[j];
|
|
|
+
|
|
|
+ amdgpu_sync_free(&id->active);
|
|
|
+ dma_fence_put(id->flushed_updates);
|
|
|
+ dma_fence_put(id->last_flush);
|
|
|
+ }
|
|
|
}
|
|
|
}
|