|
@@ -970,6 +970,11 @@ static int gfx_v9_0_sw_init(void *handle)
|
|
|
struct amdgpu_kiq *kiq;
|
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
|
|
+ /* KIQ event */
|
|
|
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
|
|
|
+ if (r)
|
|
|
+ return r;
|
|
|
+
|
|
|
/* EOP Event */
|
|
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
|
|
|
if (r)
|
|
@@ -2994,6 +2999,72 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
|
|
|
+ struct amdgpu_irq_src *src,
|
|
|
+ unsigned int type,
|
|
|
+ enum amdgpu_interrupt_state state)
|
|
|
+{
|
|
|
+ uint32_t tmp, target;
|
|
|
+ struct amdgpu_ring *ring = (struct amdgpu_ring *)src->data;
|
|
|
+
|
|
|
+ BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ));
|
|
|
+
|
|
|
+ if (ring->me == 1)
|
|
|
+ target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
|
|
|
+ else
|
|
|
+ target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
|
|
|
+ target += ring->pipe;
|
|
|
+
|
|
|
+ switch (type) {
|
|
|
+ case AMDGPU_CP_KIQ_IRQ_DRIVER0:
|
|
|
+ if (state == AMDGPU_IRQ_STATE_DISABLE) {
|
|
|
+ tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL));
|
|
|
+ tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
|
|
|
+ GENERIC2_INT_ENABLE, 0);
|
|
|
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), tmp);
|
|
|
+
|
|
|
+ tmp = RREG32(target);
|
|
|
+ tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
|
|
|
+ GENERIC2_INT_ENABLE, 0);
|
|
|
+ WREG32(target, tmp);
|
|
|
+ } else {
|
|
|
+ tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL));
|
|
|
+ tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
|
|
|
+ GENERIC2_INT_ENABLE, 1);
|
|
|
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), tmp);
|
|
|
+
|
|
|
+ tmp = RREG32(target);
|
|
|
+ tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
|
|
|
+ GENERIC2_INT_ENABLE, 1);
|
|
|
+ WREG32(target, tmp);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ BUG(); /* kiq only support GENERIC2_INT now */
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
|
|
|
+ struct amdgpu_irq_src *source,
|
|
|
+ struct amdgpu_iv_entry *entry)
|
|
|
+{
|
|
|
+ u8 me_id, pipe_id, queue_id;
|
|
|
+ struct amdgpu_ring *ring = (struct amdgpu_ring *)source->data;
|
|
|
+
|
|
|
+ BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ));
|
|
|
+
|
|
|
+ me_id = (entry->ring_id & 0x0c) >> 2;
|
|
|
+ pipe_id = (entry->ring_id & 0x03) >> 0;
|
|
|
+ queue_id = (entry->ring_id & 0x70) >> 4;
|
|
|
+ DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
|
|
|
+ me_id, pipe_id, queue_id);
|
|
|
+
|
|
|
+ amdgpu_fence_process(ring);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
|
|
|
.name = "gfx_v9_0",
|
|
|
.early_init = gfx_v9_0_early_init,
|
|
@@ -3114,6 +3185,11 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
|
|
|
adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
|
|
|
}
|
|
|
|
|
|
+static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
|
|
|
+ .set = gfx_v9_0_kiq_set_interrupt_state,
|
|
|
+ .process = gfx_v9_0_kiq_irq,
|
|
|
+};
|
|
|
+
|
|
|
static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
|
|
|
.set = gfx_v9_0_set_eop_interrupt_state,
|
|
|
.process = gfx_v9_0_eop_irq,
|
|
@@ -3139,6 +3215,9 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
|
|
|
|
|
|
adev->gfx.priv_inst_irq.num_types = 1;
|
|
|
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
|
|
|
+
|
|
|
+ adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
|
|
|
+ adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
|
|
|
}
|
|
|
|
|
|
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
|