Browse Source

drm/amdgpu: keep the owner for VMIDs

We don't need the last VM use any more, keep the owner directly.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <davdi1.zhou@amd.com>
Christian König 9 years ago
parent
commit
1c16c0a7b2
2 changed files with 24 additions and 17 deletions
  1. 5 3
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  2. 19 14
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

+ 5 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -925,8 +925,6 @@ struct amdgpu_vm_id {
 	uint64_t		pd_gpu_addr;
 	/* last flushed PD/PT update */
 	struct fence	        *flushed_updates;
-	/* last use of vmid */
-	struct fence		*last_id_use;
 };
 
 struct amdgpu_vm {
@@ -959,7 +957,11 @@ struct amdgpu_vm {
 };
 
 struct amdgpu_vm_manager {
-	struct fence				*active[AMDGPU_NUM_VM];
+	struct {
+		struct fence	*active;
+		atomic_long_t	owner;
+	} ids[AMDGPU_NUM_VM];
+
 	uint32_t				max_pfn;
 	/* number of VMIDs */
 	unsigned				nvm;

+ 19 - 14
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	unsigned i;
 
 	/* check if the id is still valid */
-	if (vm_id->id && vm_id->last_id_use &&
-	    vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
-		trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
-		return 0;
+	if (vm_id->id) {
+		unsigned id = vm_id->id;
+		long owner;
+
+		owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
+		if (owner == (long)vm) {
+			trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
+			return 0;
+		}
 	}
 
 	/* we definately need to flush */
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 
 	/* skip over VMID 0, since it is the system VM */
 	for (i = 1; i < adev->vm_manager.nvm; ++i) {
-		struct fence *fence = adev->vm_manager.active[i];
+		struct fence *fence = adev->vm_manager.ids[i].active;
 		struct amdgpu_ring *fring;
 
 		if (fence == NULL) {
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		if (choices[i]) {
 			struct fence *fence;
 
-			fence  = adev->vm_manager.active[choices[i]];
+			fence  = adev->vm_manager.ids[choices[i]].active;
 			vm_id->id = choices[i];
 
 			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@@ -246,11 +251,9 @@ void amdgpu_vm_fence(struct amdgpu_device *adev,
 	struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
 	unsigned vm_id = vm->ids[ring->idx].id;
 
-	fence_put(adev->vm_manager.active[vm_id]);
-	adev->vm_manager.active[vm_id] = fence_get(fence);
-
-	fence_put(vm->ids[ring->idx].last_id_use);
-	vm->ids[ring->idx].last_id_use = fence_get(fence);
+	fence_put(adev->vm_manager.ids[vm_id].active);
+	adev->vm_manager.ids[vm_id].active = fence_get(fence);
+	atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
 }
 
 /**
@@ -1238,7 +1241,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		vm->ids[i].id = 0;
 		vm->ids[i].flushed_updates = NULL;
-		vm->ids[i].last_id_use = NULL;
 	}
 	mutex_init(&vm->mutex);
 	vm->va = RB_ROOT;
@@ -1312,8 +1314,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	fence_put(vm->page_directory_fence);
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		unsigned id = vm->ids[i].id;
+
+		atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
+				    (long)vm, 0);
 		fence_put(vm->ids[i].flushed_updates);
-		fence_put(vm->ids[i].last_id_use);
 	}
 
 	mutex_destroy(&vm->mutex);
@@ -1331,5 +1336,5 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
 	unsigned i;
 
 	for (i = 0; i < AMDGPU_NUM_VM; ++i)
-		fence_put(adev->vm_manager.active[i]);
+		fence_put(adev->vm_manager.ids[i].active);
 }