|
@@ -7896,6 +7896,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
|
|
|
|
+ u32 request, u32 reply_mask, u32 reply,
|
|
|
|
+ u32 *status)
|
|
|
|
+{
|
|
|
|
+ u32 val = request;
|
|
|
|
+
|
|
|
|
+ *status = sandybridge_pcode_read(dev_priv, mbox, &val);
|
|
|
|
+
|
|
|
|
+ return *status || ((val & reply_mask) == reply);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * skl_pcode_request - send PCODE request until acknowledgment
|
|
|
|
+ * @dev_priv: device private
|
|
|
|
+ * @mbox: PCODE mailbox ID the request is targeted for
|
|
|
|
+ * @request: request ID
|
|
|
|
+ * @reply_mask: mask used to check for request acknowledgment
|
|
|
|
+ * @reply: value used to check for request acknowledgment
|
|
|
|
+ * @timeout_base_ms: timeout for polling with preemption enabled
|
|
|
|
+ *
|
|
|
|
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
|
|
|
|
+ * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
|
|
|
|
+ * The request is acknowledged once the PCODE reply dword equals @reply after
|
|
|
|
+ * applying @reply_mask. Polling is first attempted with preemption enabled
|
|
|
|
+ * for @timeout_base_ms and if this times out for another 10 ms with
|
|
|
|
+ * preemption disabled.
|
|
|
|
+ *
|
|
|
|
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
|
|
|
|
+ * other error as reported by PCODE.
|
|
|
|
+ */
|
|
|
|
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
|
|
|
|
+ u32 reply_mask, u32 reply, int timeout_base_ms)
|
|
|
|
+{
|
|
|
|
+ u32 status;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
|
|
|
+
|
|
|
|
+#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
|
|
|
|
+ &status)
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Prime the PCODE by doing a request first. Normally it guarantees
|
|
|
|
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
|
|
|
|
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
|
|
|
|
+ * first, so send the first request explicitly.
|
|
|
|
+ */
|
|
|
|
+ if (COND) {
|
|
|
|
+ ret = 0;
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10);
|
|
|
|
+ if (!ret)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * The above can time out if the number of requests was low (2 in the
|
|
|
|
+ * worst case) _and_ PCODE was busy for some reason even after a
|
|
|
|
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
|
|
|
|
+ * the poll with preemption disabled to maximize the number of
|
|
|
|
+ * requests. Increase the timeout from @timeout_base_ms to 10ms to
|
|
|
|
+ * account for interrupts that could reduce the number of these
|
|
|
|
+ * requests.
|
|
|
|
+ */
|
|
|
|
+ DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
|
|
|
|
+ WARN_ON_ONCE(timeout_base_ms > 3);
|
|
|
|
+ preempt_disable();
|
|
|
|
+ ret = wait_for_atomic(COND, 10);
|
|
|
|
+ preempt_enable();
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ return ret ? ret : status;
|
|
|
|
+#undef COND
|
|
|
|
+}
|
|
|
|
+
|
|
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
|
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
|
{
|
|
{
|
|
/*
|
|
/*
|