|
@@ -2706,14 +2706,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
|
|
|
struct radeon_fence *fence)
|
|
|
{
|
|
|
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
|
|
+ u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
|
|
|
+ PACKET3_SH_ACTION_ENA;
|
|
|
+
|
|
|
+ if (rdev->family >= CHIP_RV770)
|
|
|
+ cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
|
|
|
|
|
|
if (rdev->wb.use_event) {
|
|
|
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
|
|
/* flush read cache over gart */
|
|
|
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
|
|
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
|
|
|
- PACKET3_VC_ACTION_ENA |
|
|
|
- PACKET3_SH_ACTION_ENA);
|
|
|
+ radeon_ring_write(ring, cp_coher_cntl);
|
|
|
radeon_ring_write(ring, 0xFFFFFFFF);
|
|
|
radeon_ring_write(ring, 0);
|
|
|
radeon_ring_write(ring, 10); /* poll interval */
|
|
@@ -2727,9 +2730,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
|
|
|
} else {
|
|
|
/* flush read cache over gart */
|
|
|
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
|
|
|
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
|
|
|
- PACKET3_VC_ACTION_ENA |
|
|
|
- PACKET3_SH_ACTION_ENA);
|
|
|
+ radeon_ring_write(ring, cp_coher_cntl);
|
|
|
radeon_ring_write(ring, 0xFFFFFFFF);
|
|
|
radeon_ring_write(ring, 0);
|
|
|
radeon_ring_write(ring, 10); /* poll interval */
|