浏览代码

drm/amdgpu: use separate scheduler entity for UVD submissions

This allows us to remove the kernel context and use a better
priority for the submissions.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Christian König 9 年之前
父节点
当前提交
ead833eced
共有 2 个文件被更改,包括 15 次插入1 次删除
  1. 1 0
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  2. 14 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -1644,6 +1644,7 @@ struct amdgpu_uvd {
 	struct amdgpu_ring	ring;
 	struct amdgpu_ring	ring;
 	struct amdgpu_irq_src	irq;
 	struct amdgpu_irq_src	irq;
 	bool			address_64_bit;
 	bool			address_64_bit;
+	struct amd_sched_entity entity;
 };
 };
 
 
 /*
 /*

+ 14 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

@@ -91,6 +91,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
 
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 {
 {
+	struct amdgpu_ring *ring;
+	struct amd_sched_rq *rq;
 	unsigned long bo_size;
 	unsigned long bo_size;
 	const char *fw_name;
 	const char *fw_name;
 	const struct common_firmware_header *hdr;
 	const struct common_firmware_header *hdr;
@@ -191,6 +193,15 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 
 
 	amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
 	amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
 
 
+	ring = &adev->uvd.ring;
+	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+	r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
+				  rq, amdgpu_sched_jobs);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up UVD run queue.\n");
+		return r;
+	}
+
 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
 		atomic_set(&adev->uvd.handles[i], 0);
 		atomic_set(&adev->uvd.handles[i], 0);
 		adev->uvd.filp[i] = NULL;
 		adev->uvd.filp[i] = NULL;
@@ -210,6 +221,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 	if (adev->uvd.vcpu_bo == NULL)
 	if (adev->uvd.vcpu_bo == NULL)
 		return 0;
 		return 0;
 
 
+	amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
+
 	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
 	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
 	if (!r) {
 	if (!r) {
 		amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
 		amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
@@ -880,7 +893,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 
 
 		amdgpu_job_free(job);
 		amdgpu_job_free(job);
 	} else {
 	} else {
-		r = amdgpu_job_submit(job, ring, NULL,
+		r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
 		if (r)
 		if (r)
 			goto err_free;
 			goto err_free;