|
@@ -2608,8 +2608,24 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
|
|
|
{
|
|
|
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
|
|
|
uint32_t scratch, tmp = 0;
|
|
|
+ uint64_t queue_mask = 0;
|
|
|
int r, i;
|
|
|
|
|
|
+ for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
|
|
|
+ if (!test_bit(i, adev->gfx.mec.queue_bitmap))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* This situation may be hit in the future if a new HW
|
|
|
+ * generation exposes more than 64 queues. If so, the
|
|
|
+ * definition of queue_mask needs updating */
|
|
|
+ if (WARN_ON(i > (sizeof(queue_mask)*8))) {
|
|
|
+ DRM_ERROR("Invalid KCQ enabled: %d\n", i);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ queue_mask |= (1ull << i);
|
|
|
+ }
|
|
|
+
|
|
|
r = amdgpu_gfx_scratch_get(adev, &scratch);
|
|
|
if (r) {
|
|
|
DRM_ERROR("Failed to get scratch reg (%d).\n", r);
|
|
@@ -2628,8 +2644,8 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
|
|
|
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
|
|
|
amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
|
|
|
PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
|
|
|
- amdgpu_ring_write(kiq_ring, 0x000000FF); /* queue mask lo */
|
|
|
- amdgpu_ring_write(kiq_ring, 0); /* queue mask hi */
|
|
|
+ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
|
|
|
+ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
|
|
|
amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
|
|
|
amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
|
|
|
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
|