|
@@ -402,14 +402,21 @@ static void radeon_flip_work_func(struct work_struct *__work)
|
|
|
|
|
|
down_read(&rdev->exclusive_lock);
|
|
|
if (work->fence) {
|
|
|
- r = radeon_fence_wait(work->fence, false);
|
|
|
- if (r == -EDEADLK) {
|
|
|
- up_read(&rdev->exclusive_lock);
|
|
|
- do {
|
|
|
- r = radeon_gpu_reset(rdev);
|
|
|
- } while (r == -EAGAIN);
|
|
|
- down_read(&rdev->exclusive_lock);
|
|
|
- }
|
|
|
+ struct radeon_fence *fence;
|
|
|
+
|
|
|
+ fence = to_radeon_fence(work->fence);
|
|
|
+ if (fence && fence->rdev == rdev) {
|
|
|
+ r = radeon_fence_wait(fence, false);
|
|
|
+ if (r == -EDEADLK) {
|
|
|
+ up_read(&rdev->exclusive_lock);
|
|
|
+ do {
|
|
|
+ r = radeon_gpu_reset(rdev);
|
|
|
+ } while (r == -EAGAIN);
|
|
|
+ down_read(&rdev->exclusive_lock);
|
|
|
+ }
|
|
|
+ } else
|
|
|
+ r = fence_wait(work->fence, false);
|
|
|
+
|
|
|
if (r)
|
|
|
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
|
|
|
|
|
@@ -418,7 +425,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
|
|
|
* confused about which BO the CRTC is scanning out
|
|
|
*/
|
|
|
|
|
|
- radeon_fence_unref(&work->fence);
|
|
|
+ fence_put(work->fence);
|
|
|
+ work->fence = NULL;
|
|
|
}
|
|
|
|
|
|
/* We borrow the event spin lock for protecting flip_status */
|
|
@@ -494,7 +502,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
|
|
DRM_ERROR("failed to pin new rbo buffer before flip\n");
|
|
|
goto cleanup;
|
|
|
}
|
|
|
- work->fence = (struct radeon_fence *)fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
|
|
|
+ work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
|
|
|
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
|
|
|
radeon_bo_unreserve(new_rbo);
|
|
|
|
|
@@ -576,7 +584,7 @@ pflip_cleanup:
|
|
|
|
|
|
cleanup:
|
|
|
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
|
|
|
- radeon_fence_unref(&work->fence);
|
|
|
+ fence_put(work->fence);
|
|
|
kfree(work);
|
|
|
return r;
|
|
|
}
|