|
@@ -1371,7 +1371,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
|
|
|
|
|
|
if (adev->gfx.mec.hpd_eop_obj == NULL) {
|
|
|
r = amdgpu_bo_create(adev,
|
|
|
- adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
|
|
|
+ adev->gfx.mec.num_queue * MEC_HPD_SIZE,
|
|
|
PAGE_SIZE, true,
|
|
|
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
|
|
&adev->gfx.mec.hpd_eop_obj);
|
|
@@ -1400,7 +1400,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
- memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
|
|
|
+ memset(hpd, 0, adev->gfx.mec.num_queue * MEC_HPD_SIZE);
|
|
|
|
|
|
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
|
|
|
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
|
|
@@ -4763,34 +4763,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
|
|
|
u32 *buf;
|
|
|
struct vi_mqd *mqd;
|
|
|
|
|
|
- /* init the pipes */
|
|
|
- mutex_lock(&adev->srbm_mutex);
|
|
|
- for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
|
|
|
- int me = (i < 4) ? 1 : 2;
|
|
|
- int pipe = (i < 4) ? i : (i - 4);
|
|
|
-
|
|
|
- eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
|
|
|
- eop_gpu_addr >>= 8;
|
|
|
-
|
|
|
- vi_srbm_select(adev, me, pipe, 0, 0);
|
|
|
-
|
|
|
- /* write the EOP addr */
|
|
|
- WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
|
|
|
- WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
|
|
|
-
|
|
|
- /* set the VMID assigned */
|
|
|
- WREG32(mmCP_HQD_VMID, 0);
|
|
|
-
|
|
|
- /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
|
|
|
- tmp = RREG32(mmCP_HQD_EOP_CONTROL);
|
|
|
- tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
|
|
|
- (order_base_2(MEC_HPD_SIZE / 4) - 1));
|
|
|
- WREG32(mmCP_HQD_EOP_CONTROL, tmp);
|
|
|
- }
|
|
|
- vi_srbm_select(adev, 0, 0, 0, 0);
|
|
|
- mutex_unlock(&adev->srbm_mutex);
|
|
|
-
|
|
|
- /* init the queues. Just two for now. */
|
|
|
+ /* init the queues. */
|
|
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
|
|
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
|
|
|
|
|
@@ -4842,6 +4815,22 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
|
|
|
ring->pipe,
|
|
|
ring->queue, 0);
|
|
|
|
|
|
+ eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
|
|
|
+ eop_gpu_addr >>= 8;
|
|
|
+
|
|
|
+ /* write the EOP addr */
|
|
|
+ WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
|
|
|
+ WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
|
|
|
+
|
|
|
+ /* set the VMID assigned */
|
|
|
+ WREG32(mmCP_HQD_VMID, 0);
|
|
|
+
|
|
|
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
|
|
|
+ tmp = RREG32(mmCP_HQD_EOP_CONTROL);
|
|
|
+ tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
|
|
|
+ (order_base_2(MEC_HPD_SIZE / 4) - 1));
|
|
|
+ WREG32(mmCP_HQD_EOP_CONTROL, tmp);
|
|
|
+
|
|
|
/* disable wptr polling */
|
|
|
tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
|
|
|
tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
|