瀏覽代碼

drm/amdgpu: use a global LRU list for VMIDs

With the scheduler enabled managing per ring LRUs don't
make much sense any more.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Christian König 9 年之前
父節點
當前提交
a9a78b329a

+ 11 - 8
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -925,18 +925,20 @@ struct amdgpu_vm {
 	spinlock_t		freed_lock;
 	spinlock_t		freed_lock;
 };
 };
 
 
+struct amdgpu_vm_manager_id {
+	struct list_head	list;
+	struct fence		*active;
+	atomic_long_t		owner;
+};
+
 struct amdgpu_vm_manager {
 struct amdgpu_vm_manager {
-	/* protecting IDs */
+	/* Handling of VMIDs */
 	struct mutex				lock;
 	struct mutex				lock;
-
-	struct {
-		struct fence	*active;
-		atomic_long_t	owner;
-	} ids[AMDGPU_NUM_VM];
+	unsigned				num_ids;
+	struct list_head			ids_lru;
+	struct amdgpu_vm_manager_id		ids[AMDGPU_NUM_VM];
 
 
 	uint32_t				max_pfn;
 	uint32_t				max_pfn;
-	/* number of VMIDs */
-	unsigned				nvm;
 	/* vram base address for page table entry  */
 	/* vram base address for page table entry  */
 	u64					vram_base_offset;
 	u64					vram_base_offset;
 	/* is vm enabled? */
 	/* is vm enabled? */
@@ -946,6 +948,7 @@ struct amdgpu_vm_manager {
 	struct amdgpu_ring                      *vm_pte_funcs_ring;
 	struct amdgpu_ring                      *vm_pte_funcs_ring;
 };
 };
 
 
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);

+ 40 - 48
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -161,79 +161,52 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		      struct amdgpu_sync *sync, struct fence *fence)
 		      struct amdgpu_sync *sync, struct fence *fence)
 {
 {
-	struct fence *best[AMDGPU_MAX_RINGS] = {};
 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
-
-	unsigned choices[2] = {};
-	unsigned i;
+	struct amdgpu_vm_manager_id *id;
+	int r;
 
 
 	mutex_lock(&adev->vm_manager.lock);
 	mutex_lock(&adev->vm_manager.lock);
 
 
 	/* check if the id is still valid */
 	/* check if the id is still valid */
 	if (vm_id->id) {
 	if (vm_id->id) {
-		unsigned id = vm_id->id;
 		long owner;
 		long owner;
 
 
-		owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
+		id = &adev->vm_manager.ids[vm_id->id];
+		owner = atomic_long_read(&id->owner);
 		if (owner == (long)vm) {
 		if (owner == (long)vm) {
+			list_move_tail(&id->list, &adev->vm_manager.ids_lru);
 			trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
 			trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
-			fence_put(adev->vm_manager.ids[id].active);
-			adev->vm_manager.ids[id].active = fence_get(fence);
-			mutex_unlock(&adev->vm_manager.lock);
-			return 0;
-		}
-	}
 
 
-	/* we definately need to flush */
-	vm_id->pd_gpu_addr = ~0ll;
+			fence_put(id->active);
+			id->active = fence_get(fence);
 
 
-	/* skip over VMID 0, since it is the system VM */
-	for (i = 1; i < adev->vm_manager.nvm; ++i) {
-		struct fence *fence = adev->vm_manager.ids[i].active;
-		struct amdgpu_ring *fring;
-
-		if (fence == NULL) {
-			/* found a free one */
-			vm_id->id = i;
-			trace_amdgpu_vm_grab_id(vm, i, ring->idx);
 			mutex_unlock(&adev->vm_manager.lock);
 			mutex_unlock(&adev->vm_manager.lock);
 			return 0;
 			return 0;
 		}
 		}
-
-		fring = amdgpu_ring_from_fence(fence);
-		if (best[fring->idx] == NULL ||
-		    fence_is_later(best[fring->idx], fence)) {
-			best[fring->idx] = fence;
-			choices[fring == ring ? 0 : 1] = i;
-		}
 	}
 	}
 
 
-	for (i = 0; i < 2; ++i) {
-		struct fence *active;
-		int r;
-
-		if (!choices[i])
-			continue;
+	/* we definately need to flush */
+	vm_id->pd_gpu_addr = ~0ll;
 
 
-		vm_id->id = choices[i];
-		active  = adev->vm_manager.ids[vm_id->id].active;
-		r = amdgpu_sync_fence(ring->adev, sync, active);
+	id = list_first_entry(&adev->vm_manager.ids_lru,
+			      struct amdgpu_vm_manager_id,
+			      list);
+	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
+	atomic_long_set(&id->owner, (long)vm);
 
 
-		trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx);
-		atomic_long_set(&adev->vm_manager.ids[vm_id->id].owner, (long)vm);
+	vm_id->id = id - adev->vm_manager.ids;
+	trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
 
 
-		fence_put(adev->vm_manager.ids[vm_id->id].active);
-		adev->vm_manager.ids[vm_id->id].active = fence_get(fence);
+	r = amdgpu_sync_fence(ring->adev, sync, id->active);
 
 
-		mutex_unlock(&adev->vm_manager.lock);
-		return r;
+	if (!r) {
+		fence_put(id->active);
+		id->active = fence_get(fence);
 	}
 	}
 
 
-	/* should never happen */
-	BUG();
 	mutex_unlock(&adev->vm_manager.lock);
 	mutex_unlock(&adev->vm_manager.lock);
-	return -EINVAL;
+	return r;
 }
 }
 
 
 /**
 /**
@@ -1358,6 +1331,25 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
 
 }
 }
 
 
+/**
+ * amdgpu_vm_manager_init - init the VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the VM manager structures
+ */
+void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
+
+	/* skip over VMID 0, since it is the system VM */
+	for (i = 1; i < adev->vm_manager.num_ids; ++i)
+		list_add_tail(&adev->vm_manager.ids[i].list,
+			      &adev->vm_manager.ids_lru);
+}
+
 /**
 /**
  * amdgpu_vm_manager_fini - cleanup VM manager
  * amdgpu_vm_manager_fini - cleanup VM manager
  *
  *

+ 2 - 1
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c

@@ -694,7 +694,8 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdkfd will use VMIDs 8-15
 	 * amdkfd will use VMIDs 8-15
 	 */
 	 */
-	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
+	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+	amdgpu_vm_manager_init(adev);
 
 
 	/* base offset of vram pages */
 	/* base offset of vram pages */
 	if (adev->flags & AMD_IS_APU) {
 	if (adev->flags & AMD_IS_APU) {

+ 2 - 1
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c

@@ -774,7 +774,8 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdkfd will use VMIDs 8-15
 	 * amdkfd will use VMIDs 8-15
 	 */
 	 */
-	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
+	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+	amdgpu_vm_manager_init(adev);
 
 
 	/* base offset of vram pages */
 	/* base offset of vram pages */
 	if (adev->flags & AMD_IS_APU) {
 	if (adev->flags & AMD_IS_APU) {