|
@@ -129,6 +129,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
|
|
{
|
|
|
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
|
|
|
|
|
|
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
|
|
|
+ adev->last_mm_index = v;
|
|
|
+ }
|
|
|
+
|
|
|
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
|
|
|
BUG_ON(in_interrupt());
|
|
|
return amdgpu_virt_kiq_wreg(adev, reg, v);
|
|
@@ -144,6 +148,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
|
|
writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
|
|
|
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
|
|
}
|
|
|
+
|
|
|
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
|
|
|
+ udelay(500);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
|
|
@@ -158,6 +166,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
|
|
|
|
|
|
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|
|
{
|
|
|
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
|
|
|
+ adev->last_mm_index = v;
|
|
|
+ }
|
|
|
|
|
|
if ((reg * 4) < adev->rio_mem_size)
|
|
|
iowrite32(v, adev->rio_mem + (reg * 4));
|
|
@@ -165,6 +176,10 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|
|
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
|
|
|
iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
|
|
|
}
|
|
|
+
|
|
|
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
|
|
|
+ udelay(500);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|