|
@@ -109,9 +109,20 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
|
|
|
|
+{
|
|
|
|
+ /* FIXME: spreading the queues across pipes causes perf regressions
|
|
|
|
+ * on POLARIS11 compute workloads */
|
|
|
|
+ if (adev->asic_type == CHIP_POLARIS11)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ return adev->gfx.mec.num_mec > 1;
|
|
|
|
+}
|
|
|
|
+
|
|
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
|
|
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
|
|
{
|
|
{
|
|
int i, queue, pipe, mec;
|
|
int i, queue, pipe, mec;
|
|
|
|
+ bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
|
|
|
|
|
|
/* policy for amdgpu compute queue ownership */
|
|
/* policy for amdgpu compute queue ownership */
|
|
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
|
|
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
|
|
@@ -125,8 +136,7 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
|
|
if (mec >= adev->gfx.mec.num_mec)
|
|
if (mec >= adev->gfx.mec.num_mec)
|
|
break;
|
|
break;
|
|
|
|
|
|
- /* FIXME: spreading the queues across pipes causes perf regressions */
|
|
|
|
- if (0) {
|
|
|
|
|
|
+ if (multipipe_policy) {
|
|
/* policy: amdgpu owns the first two queues of the first MEC */
|
|
/* policy: amdgpu owns the first two queues of the first MEC */
|
|
if (mec == 0 && queue < 2)
|
|
if (mec == 0 && queue < 2)
|
|
set_bit(i, adev->gfx.mec.queue_bitmap);
|
|
set_bit(i, adev->gfx.mec.queue_bitmap);
|