|
@@ -91,3 +91,61 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
ttm_eu_backoff_reservation(&ticket, &list);
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
|
|
+{
|
|
|
+ mutex_init(&adev->virt.lock);
|
|
|
+}
|
|
|
+
|
|
|
+uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
|
|
+{
|
|
|
+ signed long r;
|
|
|
+ uint32_t val;
|
|
|
+ struct dma_fence *f;
|
|
|
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
|
|
+ struct amdgpu_ring *ring = &kiq->ring;
|
|
|
+
|
|
|
+ BUG_ON(!ring->funcs->emit_rreg);
|
|
|
+
|
|
|
+ mutex_lock(&adev->virt.lock);
|
|
|
+ amdgpu_ring_alloc(ring, 32);
|
|
|
+ amdgpu_ring_emit_hdp_flush(ring);
|
|
|
+ amdgpu_ring_emit_rreg(ring, reg);
|
|
|
+ amdgpu_ring_emit_hdp_invalidate(ring);
|
|
|
+ amdgpu_fence_emit(ring, &f);
|
|
|
+ amdgpu_ring_commit(ring);
|
|
|
+ mutex_unlock(&adev->virt.lock);
|
|
|
+
|
|
|
+ r = dma_fence_wait(f, false);
|
|
|
+ if (r)
|
|
|
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
|
|
|
+ dma_fence_put(f);
|
|
|
+
|
|
|
+ val = adev->wb.wb[adev->virt.reg_val_offs];
|
|
|
+
|
|
|
+ return val;
|
|
|
+}
|
|
|
+
|
|
|
+void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
|
|
|
+{
|
|
|
+ signed long r;
|
|
|
+ struct dma_fence *f;
|
|
|
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
|
|
+ struct amdgpu_ring *ring = &kiq->ring;
|
|
|
+
|
|
|
+ BUG_ON(!ring->funcs->emit_wreg);
|
|
|
+
|
|
|
+ mutex_lock(&adev->virt.lock);
|
|
|
+ amdgpu_ring_alloc(ring, 32);
|
|
|
+ amdgpu_ring_emit_hdp_flush(ring);
|
|
|
+ amdgpu_ring_emit_wreg(ring, reg, v);
|
|
|
+ amdgpu_ring_emit_hdp_invalidate(ring);
|
|
|
+ amdgpu_fence_emit(ring, &f);
|
|
|
+ amdgpu_ring_commit(ring);
|
|
|
+ mutex_unlock(&adev->virt.lock);
|
|
|
+
|
|
|
+ r = dma_fence_wait(f, false);
|
|
|
+ if (r)
|
|
|
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
|
|
|
+ dma_fence_put(f);
|
|
|
+}
|