|
@@ -1100,6 +1100,18 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
|
|
amdgpu_ring_write(ring, 0xE);
|
|
|
}
|
|
|
|
|
|
+static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ WARN_ON(ring->wptr % 2 || count % 2);
|
|
|
+
|
|
|
+ for (i = 0; i < count / 2; i++) {
|
|
|
+ amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
|
|
|
+ amdgpu_ring_write(ring, 0);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
|
|
{
|
|
|
uint32_t seq = ring->fence_drv.sync_seq;
|
|
@@ -1532,7 +1544,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
|
|
|
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
|
|
.type = AMDGPU_RING_TYPE_UVD,
|
|
|
.align_mask = 0xf,
|
|
|
- .nop = PACKET0(mmUVD_NO_OP, 0),
|
|
|
.support_64bit_ptrs = false,
|
|
|
.get_rptr = uvd_v6_0_ring_get_rptr,
|
|
|
.get_wptr = uvd_v6_0_ring_get_wptr,
|
|
@@ -1548,7 +1559,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
|
|
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
|
|
|
.test_ring = uvd_v6_0_ring_test_ring,
|
|
|
.test_ib = amdgpu_uvd_ring_test_ib,
|
|
|
- .insert_nop = amdgpu_ring_insert_nop,
|
|
|
+ .insert_nop = uvd_v6_0_ring_insert_nop,
|
|
|
.pad_ib = amdgpu_ring_generic_pad_ib,
|
|
|
.begin_use = amdgpu_uvd_ring_begin_use,
|
|
|
.end_use = amdgpu_uvd_ring_end_use,
|