|
@@ -3750,7 +3750,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
|
|
/* gen6_set_rps is called to update the frequency request, but should also be
|
|
/* gen6_set_rps is called to update the frequency request, but should also be
|
|
* called when the range (min_delay and max_delay) is modified so that we can
|
|
* called when the range (min_delay and max_delay) is modified so that we can
|
|
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
|
|
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
|
|
-void gen6_set_rps(struct drm_device *dev, u8 val)
|
|
|
|
|
|
+static void gen6_set_rps(struct drm_device *dev, u8 val)
|
|
{
|
|
{
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
|
|
@@ -3786,6 +3786,27 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
|
trace_intel_gpu_freq_change(val * 50);
|
|
trace_intel_gpu_freq_change(val * 50);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void valleyview_set_rps(struct drm_device *dev, u8 val)
|
|
|
|
+{
|
|
|
|
+ struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
+
|
|
|
|
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
|
|
|
+ WARN_ON(val > dev_priv->rps.max_freq_softlimit);
|
|
|
|
+ WARN_ON(val < dev_priv->rps.min_freq_softlimit);
|
|
|
|
+
|
|
|
|
+ if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
|
|
|
|
+ "Odd GPU freq value\n"))
|
|
|
|
+ val &= ~1;
|
|
|
|
+
|
|
|
|
+ if (val != dev_priv->rps.cur_freq)
|
|
|
|
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
|
|
|
|
+
|
|
|
|
+ I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
|
|
|
|
+
|
|
|
|
+ dev_priv->rps.cur_freq = val;
|
|
|
|
+ trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
|
|
|
|
+}
|
|
|
|
+
|
|
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
|
|
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
|
|
*
|
|
*
|
|
* * If Gfx is Idle, then
|
|
* * If Gfx is Idle, then
|
|
@@ -3850,38 +3871,20 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
|
|
|
|
|
|
void gen6_rps_boost(struct drm_i915_private *dev_priv)
|
|
void gen6_rps_boost(struct drm_i915_private *dev_priv)
|
|
{
|
|
{
|
|
- struct drm_device *dev = dev_priv->dev;
|
|
|
|
-
|
|
|
|
mutex_lock(&dev_priv->rps.hw_lock);
|
|
mutex_lock(&dev_priv->rps.hw_lock);
|
|
if (dev_priv->rps.enabled) {
|
|
if (dev_priv->rps.enabled) {
|
|
- if (IS_VALLEYVIEW(dev))
|
|
|
|
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
|
|
|
|
- else
|
|
|
|
- gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
|
|
|
|
|
|
+ intel_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
|
|
dev_priv->rps.last_adj = 0;
|
|
dev_priv->rps.last_adj = 0;
|
|
}
|
|
}
|
|
mutex_unlock(&dev_priv->rps.hw_lock);
|
|
mutex_unlock(&dev_priv->rps.hw_lock);
|
|
}
|
|
}
|
|
|
|
|
|
-void valleyview_set_rps(struct drm_device *dev, u8 val)
|
|
|
|
|
|
+void intel_set_rps(struct drm_device *dev, u8 val)
|
|
{
|
|
{
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
-
|
|
|
|
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
|
|
|
- WARN_ON(val > dev_priv->rps.max_freq_softlimit);
|
|
|
|
- WARN_ON(val < dev_priv->rps.min_freq_softlimit);
|
|
|
|
-
|
|
|
|
- if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
|
|
|
|
- "Odd GPU freq value\n"))
|
|
|
|
- val &= ~1;
|
|
|
|
-
|
|
|
|
- if (val != dev_priv->rps.cur_freq)
|
|
|
|
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
|
|
|
|
-
|
|
|
|
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
|
|
|
|
-
|
|
|
|
- dev_priv->rps.cur_freq = val;
|
|
|
|
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
|
|
|
|
|
|
+ if (IS_VALLEYVIEW(dev))
|
|
|
|
+ valleyview_set_rps(dev, val);
|
|
|
|
+ else
|
|
|
|
+ gen6_set_rps(dev, val);
|
|
}
|
|
}
|
|
|
|
|
|
static void gen9_disable_rps(struct drm_device *dev)
|
|
static void gen9_disable_rps(struct drm_device *dev)
|