|
@@ -4803,7 +4803,7 @@ struct bonaire_mqd
|
|
*/
|
|
*/
|
|
static int cik_cp_compute_resume(struct radeon_device *rdev)
|
|
static int cik_cp_compute_resume(struct radeon_device *rdev)
|
|
{
|
|
{
|
|
- int r, i, idx;
|
|
|
|
|
|
+ int r, i, j, idx;
|
|
u32 tmp;
|
|
u32 tmp;
|
|
bool use_doorbell = true;
|
|
bool use_doorbell = true;
|
|
u64 hqd_gpu_addr;
|
|
u64 hqd_gpu_addr;
|
|
@@ -4922,7 +4922,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
|
|
mqd->queue_state.cp_hqd_pq_wptr= 0;
|
|
mqd->queue_state.cp_hqd_pq_wptr= 0;
|
|
if (RREG32(CP_HQD_ACTIVE) & 1) {
|
|
if (RREG32(CP_HQD_ACTIVE) & 1) {
|
|
WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
|
|
WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
|
|
- for (i = 0; i < rdev->usec_timeout; i++) {
|
|
|
|
|
|
+ for (j = 0; j < rdev->usec_timeout; j++) {
|
|
if (!(RREG32(CP_HQD_ACTIVE) & 1))
|
|
if (!(RREG32(CP_HQD_ACTIVE) & 1))
|
|
break;
|
|
break;
|
|
udelay(1);
|
|
udelay(1);
|
|
@@ -7751,17 +7751,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
|
|
wptr = RREG32(IH_RB_WPTR);
|
|
wptr = RREG32(IH_RB_WPTR);
|
|
|
|
|
|
if (wptr & RB_OVERFLOW) {
|
|
if (wptr & RB_OVERFLOW) {
|
|
|
|
+ wptr &= ~RB_OVERFLOW;
|
|
/* When a ring buffer overflow happen start parsing interrupt
|
|
/* When a ring buffer overflow happen start parsing interrupt
|
|
* from the last not overwritten vector (wptr + 16). Hopefully
|
|
* from the last not overwritten vector (wptr + 16). Hopefully
|
|
* this should allow us to catchup.
|
|
* this should allow us to catchup.
|
|
*/
|
|
*/
|
|
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
|
|
|
|
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
|
|
|
|
|
|
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
|
|
|
|
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
|
|
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
|
|
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
|
|
tmp = RREG32(IH_RB_CNTL);
|
|
tmp = RREG32(IH_RB_CNTL);
|
|
tmp |= IH_WPTR_OVERFLOW_CLEAR;
|
|
tmp |= IH_WPTR_OVERFLOW_CLEAR;
|
|
WREG32(IH_RB_CNTL, tmp);
|
|
WREG32(IH_RB_CNTL, tmp);
|
|
- wptr &= ~RB_OVERFLOW;
|
|
|
|
}
|
|
}
|
|
return (wptr & rdev->ih.ptr_mask);
|
|
return (wptr & rdev->ih.ptr_mask);
|
|
}
|
|
}
|
|
@@ -8251,6 +8251,7 @@ restart_ih:
|
|
/* wptr/rptr are in bytes! */
|
|
/* wptr/rptr are in bytes! */
|
|
rptr += 16;
|
|
rptr += 16;
|
|
rptr &= rdev->ih.ptr_mask;
|
|
rptr &= rdev->ih.ptr_mask;
|
|
|
|
+ WREG32(IH_RB_RPTR, rptr);
|
|
}
|
|
}
|
|
if (queue_hotplug)
|
|
if (queue_hotplug)
|
|
schedule_work(&rdev->hotplug_work);
|
|
schedule_work(&rdev->hotplug_work);
|
|
@@ -8259,7 +8260,6 @@ restart_ih:
|
|
if (queue_thermal)
|
|
if (queue_thermal)
|
|
schedule_work(&rdev->pm.dpm.thermal.work);
|
|
schedule_work(&rdev->pm.dpm.thermal.work);
|
|
rdev->ih.rptr = rptr;
|
|
rdev->ih.rptr = rptr;
|
|
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
|
|
|
|
atomic_set(&rdev->ih.lock, 0);
|
|
atomic_set(&rdev->ih.lock, 0);
|
|
|
|
|
|
/* make sure wptr hasn't changed while processing */
|
|
/* make sure wptr hasn't changed while processing */
|