|
@@ -3538,41 +3538,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
|
|
|
pixel_size, enabled, scaled);
|
|
|
}
|
|
|
|
|
|
-static struct drm_i915_gem_object *
|
|
|
-intel_alloc_context_page(struct drm_device *dev)
|
|
|
-{
|
|
|
- struct drm_i915_gem_object *ctx;
|
|
|
- int ret;
|
|
|
-
|
|
|
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
-
|
|
|
- ctx = i915_gem_alloc_object(dev, 4096);
|
|
|
- if (!ctx) {
|
|
|
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-
|
|
|
- ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
|
|
|
- if (ret) {
|
|
|
- DRM_ERROR("failed to pin power context: %d\n", ret);
|
|
|
- goto err_unref;
|
|
|
- }
|
|
|
-
|
|
|
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
|
|
|
- if (ret) {
|
|
|
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
|
|
|
- goto err_unpin;
|
|
|
- }
|
|
|
-
|
|
|
- return ctx;
|
|
|
-
|
|
|
-err_unpin:
|
|
|
- i915_gem_object_ggtt_unpin(ctx);
|
|
|
-err_unref:
|
|
|
- drm_gem_object_unreference(&ctx->base);
|
|
|
- return NULL;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* Lock protecting IPS related data structures
|
|
|
*/
|
|
@@ -4989,124 +4954,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|
|
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
|
|
}
|
|
|
|
|
|
-void ironlake_teardown_rc6(struct drm_device *dev)
|
|
|
-{
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
-
|
|
|
- if (dev_priv->ips.renderctx) {
|
|
|
- i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
|
|
|
- drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
|
|
|
- dev_priv->ips.renderctx = NULL;
|
|
|
- }
|
|
|
-
|
|
|
- if (dev_priv->ips.pwrctx) {
|
|
|
- i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
|
|
|
- drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
|
|
|
- dev_priv->ips.pwrctx = NULL;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void ironlake_disable_rc6(struct drm_device *dev)
|
|
|
-{
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
-
|
|
|
- if (I915_READ(PWRCTXA)) {
|
|
|
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
|
|
|
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
|
|
|
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
|
|
|
- 50);
|
|
|
-
|
|
|
- I915_WRITE(PWRCTXA, 0);
|
|
|
- POSTING_READ(PWRCTXA);
|
|
|
-
|
|
|
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
|
|
|
- POSTING_READ(RSTDBYCTL);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static int ironlake_setup_rc6(struct drm_device *dev)
|
|
|
-{
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
-
|
|
|
- if (dev_priv->ips.renderctx == NULL)
|
|
|
- dev_priv->ips.renderctx = intel_alloc_context_page(dev);
|
|
|
- if (!dev_priv->ips.renderctx)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- if (dev_priv->ips.pwrctx == NULL)
|
|
|
- dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
|
|
|
- if (!dev_priv->ips.pwrctx) {
|
|
|
- ironlake_teardown_rc6(dev);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void ironlake_enable_rc6(struct drm_device *dev)
|
|
|
-{
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
|
|
- bool was_interruptible;
|
|
|
- int ret;
|
|
|
-
|
|
|
- /* rc6 disabled by default due to repeated reports of hanging during
|
|
|
- * boot and resume.
|
|
|
- */
|
|
|
- if (!intel_enable_rc6(dev))
|
|
|
- return;
|
|
|
-
|
|
|
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
-
|
|
|
- ret = ironlake_setup_rc6(dev);
|
|
|
- if (ret)
|
|
|
- return;
|
|
|
-
|
|
|
- was_interruptible = dev_priv->mm.interruptible;
|
|
|
- dev_priv->mm.interruptible = false;
|
|
|
-
|
|
|
- /*
|
|
|
- * GPU can automatically power down the render unit if given a page
|
|
|
- * to save state.
|
|
|
- */
|
|
|
- ret = intel_ring_begin(ring, 6);
|
|
|
- if (ret) {
|
|
|
- ironlake_teardown_rc6(dev);
|
|
|
- dev_priv->mm.interruptible = was_interruptible;
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
|
|
|
- intel_ring_emit(ring, MI_SET_CONTEXT);
|
|
|
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
|
|
|
- MI_MM_SPACE_GTT |
|
|
|
- MI_SAVE_EXT_STATE_EN |
|
|
|
- MI_RESTORE_EXT_STATE_EN |
|
|
|
- MI_RESTORE_INHIBIT);
|
|
|
- intel_ring_emit(ring, MI_SUSPEND_FLUSH);
|
|
|
- intel_ring_emit(ring, MI_NOOP);
|
|
|
- intel_ring_emit(ring, MI_FLUSH);
|
|
|
- intel_ring_advance(ring);
|
|
|
-
|
|
|
- /*
|
|
|
- * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
|
|
|
- * does an implicit flush, combined with MI_FLUSH above, it should be
|
|
|
- * safe to assume that renderctx is valid
|
|
|
- */
|
|
|
- ret = intel_ring_idle(ring);
|
|
|
- dev_priv->mm.interruptible = was_interruptible;
|
|
|
- if (ret) {
|
|
|
- DRM_ERROR("failed to enable ironlake power savings\n");
|
|
|
- ironlake_teardown_rc6(dev);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
|
|
|
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
|
|
|
-
|
|
|
- intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
|
|
|
-}
|
|
|
-
|
|
|
static unsigned long intel_pxfreq(u32 vidfreq)
|
|
|
{
|
|
|
unsigned long freq;
|
|
@@ -5654,7 +5501,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
|
|
|
|
|
|
if (IS_IRONLAKE_M(dev)) {
|
|
|
ironlake_disable_drps(dev);
|
|
|
- ironlake_disable_rc6(dev);
|
|
|
} else if (INTEL_INFO(dev)->gen >= 6) {
|
|
|
intel_suspend_gt_powersave(dev);
|
|
|
|
|
@@ -5725,7 +5571,6 @@ void intel_enable_gt_powersave(struct drm_device *dev)
|
|
|
if (IS_IRONLAKE_M(dev)) {
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
ironlake_enable_drps(dev);
|
|
|
- ironlake_enable_rc6(dev);
|
|
|
intel_init_emon(dev);
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
} else if (INTEL_INFO(dev)->gen >= 6) {
|