|
@@ -3035,6 +3035,73 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
|
|
|
/* FIXME: implement */
|
|
|
}
|
|
|
|
|
|
+static void r600_uvd_init(struct radeon_device *rdev)
|
|
|
+{
|
|
|
+ int r;
|
|
|
+
|
|
|
+ if (!rdev->has_uvd)
|
|
|
+ return;
|
|
|
+
|
|
|
+ r = radeon_uvd_init(rdev);
|
|
|
+ if (r) {
|
|
|
+ dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
|
|
|
+ /*
|
|
|
+ * At this point rdev->uvd.vcpu_bo is NULL which trickles down
|
|
|
+ * to early fails uvd_v1_0_resume() and thus nothing happens
|
|
|
+ * there. So it is pointless to try to go through that code
|
|
|
+ * hence why we disable uvd here.
|
|
|
+ */
|
|
|
+ rdev->has_uvd = 0;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
|
|
|
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
|
|
|
+}
|
|
|
+
|
|
|
+static void r600_uvd_start(struct radeon_device *rdev)
|
|
|
+{
|
|
|
+ int r;
|
|
|
+
|
|
|
+ if (!rdev->has_uvd)
|
|
|
+ return;
|
|
|
+
|
|
|
+ r = uvd_v1_0_resume(rdev);
|
|
|
+ if (r) {
|
|
|
+ dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
|
|
|
+ if (r) {
|
|
|
+ dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ return;
|
|
|
+
|
|
|
+error:
|
|
|
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void r600_uvd_resume(struct radeon_device *rdev)
|
|
|
+{
|
|
|
+ struct radeon_ring *ring;
|
|
|
+ int r;
|
|
|
+
|
|
|
+ if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
|
|
|
+ return;
|
|
|
+
|
|
|
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
|
|
|
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
|
|
|
+ if (r) {
|
|
|
+ dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ r = uvd_v1_0_init(rdev);
|
|
|
+ if (r) {
|
|
|
+ dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int r600_startup(struct radeon_device *rdev)
|
|
|
{
|
|
|
struct radeon_ring *ring;
|
|
@@ -3070,17 +3137,7 @@ static int r600_startup(struct radeon_device *rdev)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
- if (rdev->has_uvd) {
|
|
|
- r = uvd_v1_0_resume(rdev);
|
|
|
- if (!r) {
|
|
|
- r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
|
|
|
- if (r) {
|
|
|
- dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
|
|
|
- }
|
|
|
- }
|
|
|
- if (r)
|
|
|
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
|
|
|
- }
|
|
|
+ r600_uvd_start(rdev);
|
|
|
|
|
|
/* Enable IRQ */
|
|
|
if (!rdev->irq.installed) {
|
|
@@ -3110,17 +3167,7 @@ static int r600_startup(struct radeon_device *rdev)
|
|
|
if (r)
|
|
|
return r;
|
|
|
|
|
|
- if (rdev->has_uvd) {
|
|
|
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
|
|
|
- if (ring->ring_size) {
|
|
|
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
|
|
|
- RADEON_CP_PACKET2);
|
|
|
- if (!r)
|
|
|
- r = uvd_v1_0_init(rdev);
|
|
|
- if (r)
|
|
|
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
|
|
|
- }
|
|
|
- }
|
|
|
+ r600_uvd_resume(rdev);
|
|
|
|
|
|
r = radeon_ib_pool_init(rdev);
|
|
|
if (r) {
|
|
@@ -3264,13 +3311,7 @@ int r600_init(struct radeon_device *rdev)
|
|
|
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
|
|
|
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
|
|
|
|
|
|
- if (rdev->has_uvd) {
|
|
|
- r = radeon_uvd_init(rdev);
|
|
|
- if (!r) {
|
|
|
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
|
|
|
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
|
|
|
- }
|
|
|
- }
|
|
|
+ r600_uvd_init(rdev);
|
|
|
|
|
|
rdev->ih.ring_obj = NULL;
|
|
|
r600_ih_ring_init(rdev, 64 * 1024);
|