|
@@ -495,6 +495,22 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
|
|
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
|
|
|
+{
|
|
|
|
+ return
|
|
|
|
+ 7 + 3; /* si_dma_ring_emit_ib */
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
|
|
|
+{
|
|
|
|
+ return
|
|
|
|
+ 3 + /* si_dma_ring_emit_hdp_flush */
|
|
|
|
+ 3 + /* si_dma_ring_emit_hdp_invalidate */
|
|
|
|
+ 6 + /* si_dma_ring_emit_pipeline_sync */
|
|
|
|
+ 12 + /* si_dma_ring_emit_vm_flush */
|
|
|
|
+ 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
|
|
|
|
+}
|
|
|
|
+
|
|
static int si_dma_early_init(void *handle)
|
|
static int si_dma_early_init(void *handle)
|
|
{
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
@@ -778,6 +794,8 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
|
|
.test_ib = si_dma_ring_test_ib,
|
|
.test_ib = si_dma_ring_test_ib,
|
|
.insert_nop = amdgpu_ring_insert_nop,
|
|
.insert_nop = amdgpu_ring_insert_nop,
|
|
.pad_ib = si_dma_ring_pad_ib,
|
|
.pad_ib = si_dma_ring_pad_ib,
|
|
|
|
+ .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
|
|
|
|
+ .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
|
|
};
|
|
};
|
|
|
|
|
|
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
|
|
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
|