|
@@ -903,6 +903,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
|
|
|
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
|
|
|
unsigned vm_id, uint64_t pd_addr)
|
|
|
{
|
|
|
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
|
|
|
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
|
|
|
+
|
|
|
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
|
|
if (vm_id < 8) {
|
|
|
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
|
|
@@ -943,5 +946,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
|
|
|
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
|
|
|
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
|
|
|
radeon_ring_write(ring, 1 << vm_id);
|
|
|
+
|
|
|
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
|
|
|
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
|
|
|
+ radeon_ring_write(ring, 0);
|
|
|
+ radeon_ring_write(ring, 0); /* reference */
|
|
|
+ radeon_ring_write(ring, 0); /* mask */
|
|
|
+ radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
|
|
|
}
|
|
|
|