|
@@ -3905,7 +3905,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
|
|
|
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
|
|
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
|
|
|
|
|
- /* EVENT_WRITE_EOP - flush caches, send int */
|
|
|
+ /* Workaround for cache flush problems. First send a dummy EOP
|
|
|
+ * event down the pipe with seq one below.
|
|
|
+ */
|
|
|
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
|
|
+ radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
|
|
+ EOP_TC_ACTION_EN |
|
|
|
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
|
|
|
+ EVENT_INDEX(5)));
|
|
|
+ radeon_ring_write(ring, addr & 0xfffffffc);
|
|
|
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
|
|
|
+ DATA_SEL(1) | INT_SEL(0));
|
|
|
+ radeon_ring_write(ring, fence->seq - 1);
|
|
|
+ radeon_ring_write(ring, 0);
|
|
|
+
|
|
|
+ /* Then send the real EOP event down the pipe. */
|
|
|
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
|
|
radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
|
|
EOP_TC_ACTION_EN |
|
|
@@ -7359,7 +7373,6 @@ int cik_irq_set(struct radeon_device *rdev)
|
|
|
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
|
|
|
u32 grbm_int_cntl = 0;
|
|
|
u32 dma_cntl, dma_cntl1;
|
|
|
- u32 thermal_int;
|
|
|
|
|
|
if (!rdev->irq.installed) {
|
|
|
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
|
|
@@ -7389,13 +7402,6 @@ int cik_irq_set(struct radeon_device *rdev)
|
|
|
|
|
|
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
|
|
|
|
|
|
- if (rdev->flags & RADEON_IS_IGP)
|
|
|
- thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
|
|
|
- ~(THERM_INTH_MASK | THERM_INTL_MASK);
|
|
|
- else
|
|
|
- thermal_int = RREG32_SMC(CG_THERMAL_INT) &
|
|
|
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
|
|
|
-
|
|
|
/* enable CP interrupts on all rings */
|
|
|
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
|
|
|
DRM_DEBUG("cik_irq_set: sw int gfx\n");
|
|
@@ -7499,14 +7505,6 @@ int cik_irq_set(struct radeon_device *rdev)
|
|
|
hpd6 |= DC_HPDx_INT_EN;
|
|
|
}
|
|
|
|
|
|
- if (rdev->irq.dpm_thermal) {
|
|
|
- DRM_DEBUG("dpm thermal\n");
|
|
|
- if (rdev->flags & RADEON_IS_IGP)
|
|
|
- thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
|
|
|
- else
|
|
|
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
|
|
|
- }
|
|
|
-
|
|
|
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
|
|
|
|
|
|
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
|
|
@@ -7553,11 +7551,6 @@ int cik_irq_set(struct radeon_device *rdev)
|
|
|
WREG32(DC_HPD5_INT_CONTROL, hpd5);
|
|
|
WREG32(DC_HPD6_INT_CONTROL, hpd6);
|
|
|
|
|
|
- if (rdev->flags & RADEON_IS_IGP)
|
|
|
- WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
|
|
|
- else
|
|
|
- WREG32_SMC(CG_THERMAL_INT, thermal_int);
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|