Эх сурвалжийг харах

Merge tag 'drm-intel-fixes-2014-02-14' of ssh://git.freedesktop.org/git/drm-intel into drm-fixes

3 fixes plus 1 prep patch, all four cc: stable. Jani will take over from
here and the plan is that he'll do 3.14-fixes for the entire release just
to work things out a bit.

* tag 'drm-intel-fixes-2014-02-14' of ssh://git.freedesktop.org/git/drm-intel:
  drm/i915/dp: add native aux defer retry limit
  drm/i915/dp: increase native aux defer retry timeout
  drm/i915: Prevent MI_DISPLAY_FLIP straddling two cachelines on IVB
  drm/i915: Add intel_ring_cachline_align()
Dave Airlie 11 жил өмнө
parent
commit
42738c2b39

+ 14 - 0
drivers/gpu/drm/i915/intel_display.c

@@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	if (ring->id == RCS)
 	if (ring->id == RCS)
 		len += 6;
 		len += 6;
 
 
+	/*
+	 * BSpec MI_DISPLAY_FLIP for IVB:
+	 * "The full packet must be contained within the same cache line."
+	 *
+	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+	 * cacheline, if we ever start emitting more commands before
+	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
+	 * then do the cacheline alignment, and finally emit the
+	 * MI_DISPLAY_FLIP.
+	 */
+	ret = intel_ring_cacheline_align(ring);
+	if (ret)
+		goto err_unpin;
+
 	ret = intel_ring_begin(ring, len);
 	ret = intel_ring_begin(ring, len);
 	if (ret)
 	if (ret)
 		goto err_unpin;
 		goto err_unpin;

+ 13 - 6
drivers/gpu/drm/i915/intel_dp.c

@@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
 	uint8_t	msg[20];
 	uint8_t	msg[20];
 	int msg_bytes;
 	int msg_bytes;
 	uint8_t	ack;
 	uint8_t	ack;
+	int retry;
 
 
 	if (WARN_ON(send_bytes > 16))
 	if (WARN_ON(send_bytes > 16))
 		return -E2BIG;
 		return -E2BIG;
@@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
 	msg[3] = send_bytes - 1;
 	msg[3] = send_bytes - 1;
 	memcpy(&msg[4], send, send_bytes);
 	memcpy(&msg[4], send, send_bytes);
 	msg_bytes = send_bytes + 4;
 	msg_bytes = send_bytes + 4;
-	for (;;) {
+	for (retry = 0; retry < 7; retry++) {
 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
 		if (ret < 0)
 		if (ret < 0)
 			return ret;
 			return ret;
 		ack >>= 4;
 		ack >>= 4;
 		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
 		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
-			break;
+			return send_bytes;
 		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
 		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
-			udelay(100);
+			usleep_range(400, 500);
 		else
 		else
 			return -EIO;
 			return -EIO;
 	}
 	}
-	return send_bytes;
+
+	DRM_ERROR("too many retries, giving up\n");
+	return -EIO;
 }
 }
 
 
 /* Write a single byte to the aux channel in native mode */
 /* Write a single byte to the aux channel in native mode */
@@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
 	int reply_bytes;
 	int reply_bytes;
 	uint8_t ack;
 	uint8_t ack;
 	int ret;
 	int ret;
+	int retry;
 
 
 	if (WARN_ON(recv_bytes > 19))
 	if (WARN_ON(recv_bytes > 19))
 		return -E2BIG;
 		return -E2BIG;
@@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
 	msg_bytes = 4;
 	msg_bytes = 4;
 	reply_bytes = recv_bytes + 1;
 	reply_bytes = recv_bytes + 1;
 
 
-	for (;;) {
+	for (retry = 0; retry < 7; retry++) {
 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
 				      reply, reply_bytes);
 				      reply, reply_bytes);
 		if (ret == 0)
 		if (ret == 0)
@@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
 			return ret - 1;
 			return ret - 1;
 		}
 		}
 		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
 		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
-			udelay(100);
+			usleep_range(400, 500);
 		else
 		else
 			return -EIO;
 			return -EIO;
 	}
 	}
+
+	DRM_ERROR("too many retries, giving up\n");
+	return -EIO;
 }
 }
 
 
 static int
 static int

+ 21 - 0
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
 	return 0;
 	return 0;
 }
 }
 
 
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+{
+	int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+	int ret;
+
+	if (num_dwords == 0)
+		return 0;
+
+	ret = intel_ring_begin(ring, num_dwords);
+	if (ret)
+		return ret;
+
+	while (num_dwords--)
+		intel_ring_emit(ring, MI_NOOP);
+
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;

+ 1 - 0
drivers/gpu/drm/i915/intel_ringbuffer.h

@@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 
 
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
 				   u32 data)
 				   u32 data)
 {
 {