|
@@ -48,6 +48,11 @@
|
|
|
#include <linux/reservation.h>
|
|
|
#include <linux/dma-buf.h>
|
|
|
|
|
|
+static bool is_mmio_work(struct intel_flip_work *work)
|
|
|
+{
|
|
|
+ return work->mmio_work.func;
|
|
|
+}
|
|
|
+
|
|
|
/* Primary plane formats for gen <= 3 */
|
|
|
static const uint32_t i8xx_primary_formats[] = {
|
|
|
DRM_FORMAT_C8,
|
|
@@ -3113,7 +3118,7 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
|
|
|
struct intel_crtc *crtc;
|
|
|
|
|
|
for_each_intel_crtc(dev_priv->dev, crtc)
|
|
|
- intel_finish_page_flip(dev_priv, crtc->pipe);
|
|
|
+ intel_finish_page_flip_cs(dev_priv, crtc->pipe);
|
|
|
}
|
|
|
|
|
|
static void intel_update_primary_planes(struct drm_device *dev)
|
|
@@ -3215,7 +3220,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
|
|
|
return false;
|
|
|
|
|
|
spin_lock_irq(&dev->event_lock);
|
|
|
- pending = to_intel_crtc(crtc)->unpin_work != NULL;
|
|
|
+ pending = to_intel_crtc(crtc)->flip_work != NULL;
|
|
|
spin_unlock_irq(&dev->event_lock);
|
|
|
|
|
|
return pending;
|
|
@@ -3794,7 +3799,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
|
|
|
if (atomic_read(&crtc->unpin_work_count) == 0)
|
|
|
continue;
|
|
|
|
|
|
- if (crtc->unpin_work)
|
|
|
+ if (crtc->flip_work)
|
|
|
intel_wait_for_vblank(dev, crtc->pipe);
|
|
|
|
|
|
return true;
|
|
@@ -3806,9 +3811,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
|
|
|
static void page_flip_completed(struct intel_crtc *intel_crtc)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
|
|
- struct intel_unpin_work *work = intel_crtc->unpin_work;
|
|
|
+ struct intel_flip_work *work = intel_crtc->flip_work;
|
|
|
|
|
|
- intel_crtc->unpin_work = NULL;
|
|
|
+ intel_crtc->flip_work = NULL;
|
|
|
|
|
|
if (work->event)
|
|
|
drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
|
|
@@ -3816,7 +3821,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
|
|
|
drm_crtc_vblank_put(&intel_crtc->base);
|
|
|
|
|
|
wake_up_all(&dev_priv->pending_flip_queue);
|
|
|
- queue_work(dev_priv->wq, &work->work);
|
|
|
+ queue_work(dev_priv->wq, &work->unpin_work);
|
|
|
|
|
|
trace_i915_flip_complete(intel_crtc->plane,
|
|
|
work->pending_flip_obj);
|
|
@@ -3840,9 +3845,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
|
|
|
|
|
if (ret == 0) {
|
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
+ struct intel_flip_work *work;
|
|
|
|
|
|
spin_lock_irq(&dev->event_lock);
|
|
|
- if (intel_crtc->unpin_work) {
|
|
|
+ work = intel_crtc->flip_work;
|
|
|
+ if (work && !is_mmio_work(work)) {
|
|
|
WARN_ONCE(1, "Removing stuck page flip\n");
|
|
|
page_flip_completed(intel_crtc);
|
|
|
}
|
|
@@ -6225,7 +6232,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
|
|
return;
|
|
|
|
|
|
if (to_intel_plane_state(crtc->primary->state)->visible) {
|
|
|
- WARN_ON(intel_crtc->unpin_work);
|
|
|
+ WARN_ON(intel_crtc->flip_work);
|
|
|
|
|
|
intel_pre_disable_primary_noatomic(crtc);
|
|
|
|
|
@@ -10821,15 +10828,16 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
|
|
|
{
|
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
struct drm_device *dev = crtc->dev;
|
|
|
- struct intel_unpin_work *work;
|
|
|
+ struct intel_flip_work *work;
|
|
|
|
|
|
spin_lock_irq(&dev->event_lock);
|
|
|
- work = intel_crtc->unpin_work;
|
|
|
- intel_crtc->unpin_work = NULL;
|
|
|
+ work = intel_crtc->flip_work;
|
|
|
+ intel_crtc->flip_work = NULL;
|
|
|
spin_unlock_irq(&dev->event_lock);
|
|
|
|
|
|
if (work) {
|
|
|
- cancel_work_sync(&work->work);
|
|
|
+ cancel_work_sync(&work->mmio_work);
|
|
|
+ cancel_work_sync(&work->unpin_work);
|
|
|
kfree(work);
|
|
|
}
|
|
|
|
|
@@ -10840,12 +10848,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
|
|
|
|
|
|
static void intel_unpin_work_fn(struct work_struct *__work)
|
|
|
{
|
|
|
- struct intel_unpin_work *work =
|
|
|
- container_of(__work, struct intel_unpin_work, work);
|
|
|
+ struct intel_flip_work *work =
|
|
|
+ container_of(__work, struct intel_flip_work, unpin_work);
|
|
|
struct intel_crtc *crtc = to_intel_crtc(work->crtc);
|
|
|
struct drm_device *dev = crtc->base.dev;
|
|
|
struct drm_plane *primary = crtc->base.primary;
|
|
|
|
|
|
+ if (is_mmio_work(work))
|
|
|
+ flush_work(&work->mmio_work);
|
|
|
+
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
|
|
|
drm_gem_object_unreference(&work->pending_flip_obj->base);
|
|
@@ -10870,15 +10881,13 @@ static bool g4x_flip_count_after_eq(u32 a, u32 b)
|
|
|
return !((a - b) & 0x80000000);
|
|
|
}
|
|
|
|
|
|
-static bool page_flip_finished(struct intel_crtc *crtc)
|
|
|
+static bool __pageflip_finished_cs(struct intel_crtc *crtc,
|
|
|
+ struct intel_flip_work *work)
|
|
|
{
|
|
|
struct drm_device *dev = crtc->base.dev;
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
unsigned reset_counter;
|
|
|
|
|
|
- /* ensure that the unpin work is consistent wrt ->pending. */
|
|
|
- smp_rmb();
|
|
|
-
|
|
|
reset_counter = i915_reset_counter(&dev_priv->gpu_error);
|
|
|
if (crtc->reset_counter != reset_counter)
|
|
|
return true;
|
|
@@ -10915,17 +10924,74 @@ static bool page_flip_finished(struct intel_crtc *crtc)
|
|
|
* anyway, we don't really care.
|
|
|
*/
|
|
|
return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
|
|
|
- crtc->unpin_work->gtt_offset &&
|
|
|
+ crtc->flip_work->gtt_offset &&
|
|
|
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
|
|
|
- crtc->unpin_work->flip_count);
|
|
|
+ crtc->flip_work->flip_count);
|
|
|
+}
|
|
|
+
|
|
|
+static bool
|
|
|
+__pageflip_finished_mmio(struct intel_crtc *crtc,
|
|
|
+ struct intel_flip_work *work)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * MMIO work completes when vblank is different from
|
|
|
+ * flip_queued_vblank.
|
|
|
+ *
|
|
|
+ * Reset counter value doesn't matter, this is handled by
|
|
|
+ * i915_wait_request finishing early, so no need to handle
|
|
|
+ * reset here.
|
|
|
+ */
|
|
|
+ return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+static bool pageflip_finished(struct intel_crtc *crtc,
|
|
|
+ struct intel_flip_work *work)
|
|
|
+{
|
|
|
+ if (!atomic_read(&work->pending))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ smp_rmb();
|
|
|
+
|
|
|
+ if (is_mmio_work(work))
|
|
|
+ return __pageflip_finished_mmio(crtc, work);
|
|
|
+ else
|
|
|
+ return __pageflip_finished_cs(crtc, work);
|
|
|
+}
|
|
|
+
|
|
|
+void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
|
|
|
+{
|
|
|
+ struct drm_device *dev = dev_priv->dev;
|
|
|
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
|
|
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
+ struct intel_flip_work *work;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ /* Ignore early vblank irqs */
|
|
|
+ if (!crtc)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This is called both by irq handlers and the reset code (to complete
|
|
|
+ * lost pageflips) so needs the full irqsave spinlocks.
|
|
|
+ */
|
|
|
+ spin_lock_irqsave(&dev->event_lock, flags);
|
|
|
+ work = intel_crtc->flip_work;
|
|
|
+
|
|
|
+ if (work != NULL &&
|
|
|
+ !is_mmio_work(work) &&
|
|
|
+ pageflip_finished(intel_crtc, work))
|
|
|
+ page_flip_completed(intel_crtc);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
}
|
|
|
|
|
|
-void intel_finish_page_flip(struct drm_i915_private *dev_priv, int pipe)
|
|
|
+void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
|
|
|
{
|
|
|
struct drm_device *dev = dev_priv->dev;
|
|
|
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
- struct intel_unpin_work *work;
|
|
|
+ struct intel_flip_work *work;
|
|
|
unsigned long flags;
|
|
|
|
|
|
/* Ignore early vblank irqs */
|
|
@@ -10937,18 +11003,21 @@ void intel_finish_page_flip(struct drm_i915_private *dev_priv, int pipe)
|
|
|
* lost pageflips) so needs the full irqsave spinlocks.
|
|
|
*/
|
|
|
spin_lock_irqsave(&dev->event_lock, flags);
|
|
|
- work = intel_crtc->unpin_work;
|
|
|
+ work = intel_crtc->flip_work;
|
|
|
|
|
|
if (work != NULL &&
|
|
|
- atomic_read(&work->pending) &&
|
|
|
- page_flip_finished(intel_crtc))
|
|
|
+ is_mmio_work(work) &&
|
|
|
+ pageflip_finished(intel_crtc, work))
|
|
|
page_flip_completed(intel_crtc);
|
|
|
|
|
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
}
|
|
|
|
|
|
-static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
|
|
|
+static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
|
|
|
+ struct intel_flip_work *work)
|
|
|
{
|
|
|
+ work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
|
|
|
+
|
|
|
/* Ensure that the work item is consistent when activating it ... */
|
|
|
smp_mb__before_atomic();
|
|
|
atomic_set(&work->pending, 1);
|
|
@@ -10982,7 +11051,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
|
|
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
|
|
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
|
|
intel_ring_emit(engine, fb->pitches[0]);
|
|
|
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
|
|
|
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
|
|
intel_ring_emit(engine, 0); /* aux display base address, unused */
|
|
|
|
|
|
return 0;
|
|
@@ -11013,7 +11082,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
|
|
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
|
|
|
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
|
|
intel_ring_emit(engine, fb->pitches[0]);
|
|
|
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
|
|
|
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
|
|
intel_ring_emit(engine, MI_NOOP);
|
|
|
|
|
|
return 0;
|
|
@@ -11043,7 +11112,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
|
|
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
|
|
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
|
|
intel_ring_emit(engine, fb->pitches[0]);
|
|
|
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
|
|
|
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
|
|
|
obj->tiling_mode);
|
|
|
|
|
|
/* XXX Enabling the panel-fitter across page-flip is so far
|
|
@@ -11077,7 +11146,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
|
|
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
|
|
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
|
|
intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
|
|
|
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
|
|
|
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
|
|
|
|
|
/* Contrary to the suggestions in the documentation,
|
|
|
* "Enable Panel Fitter" does not seem to be required when page
|
|
@@ -11180,7 +11249,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|
|
|
|
|
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
|
|
|
intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
|
|
|
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
|
|
|
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
|
|
intel_ring_emit(engine, (MI_NOOP));
|
|
|
|
|
|
return 0;
|
|
@@ -11219,7 +11288,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
|
|
|
|
|
|
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
|
|
|
unsigned int rotation,
|
|
|
- struct intel_unpin_work *work)
|
|
|
+ struct intel_flip_work *work)
|
|
|
{
|
|
|
struct drm_device *dev = intel_crtc->base.dev;
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
@@ -11271,7 +11340,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
|
|
|
}
|
|
|
|
|
|
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
|
|
|
- struct intel_unpin_work *work)
|
|
|
+ struct intel_flip_work *work)
|
|
|
{
|
|
|
struct drm_device *dev = intel_crtc->base.dev;
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
@@ -11294,48 +11363,20 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
|
|
|
POSTING_READ(DSPSURF(intel_crtc->plane));
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * XXX: This is the temporary way to update the plane registers until we get
|
|
|
- * around to using the usual plane update functions for MMIO flips
|
|
|
- */
|
|
|
-static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
|
|
|
-{
|
|
|
- struct intel_crtc *crtc = mmio_flip->crtc;
|
|
|
- struct intel_unpin_work *work;
|
|
|
-
|
|
|
- spin_lock_irq(&crtc->base.dev->event_lock);
|
|
|
- work = crtc->unpin_work;
|
|
|
- spin_unlock_irq(&crtc->base.dev->event_lock);
|
|
|
- if (work == NULL)
|
|
|
- return;
|
|
|
-
|
|
|
- intel_pipe_update_start(crtc);
|
|
|
-
|
|
|
- if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
|
|
|
- skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
|
|
|
- else
|
|
|
- /* use_mmio_flip() retricts MMIO flips to ilk+ */
|
|
|
- ilk_do_mmio_flip(crtc, work);
|
|
|
-
|
|
|
- intel_pipe_update_end(crtc);
|
|
|
-
|
|
|
- intel_mark_page_flip_active(work);
|
|
|
-}
|
|
|
-
|
|
|
-static void intel_mmio_flip_work_func(struct work_struct *work)
|
|
|
+static void intel_mmio_flip_work_func(struct work_struct *w)
|
|
|
{
|
|
|
- struct intel_mmio_flip *mmio_flip =
|
|
|
- container_of(work, struct intel_mmio_flip, work);
|
|
|
+ struct intel_flip_work *work =
|
|
|
+ container_of(w, struct intel_flip_work, mmio_work);
|
|
|
+ struct intel_crtc *crtc = to_intel_crtc(work->crtc);
|
|
|
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
|
|
struct intel_framebuffer *intel_fb =
|
|
|
- to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
|
|
|
+ to_intel_framebuffer(crtc->base.primary->fb);
|
|
|
struct drm_i915_gem_object *obj = intel_fb->obj;
|
|
|
|
|
|
- if (mmio_flip->req) {
|
|
|
- WARN_ON(__i915_wait_request(mmio_flip->req,
|
|
|
+ if (work->flip_queued_req)
|
|
|
+ WARN_ON(__i915_wait_request(work->flip_queued_req,
|
|
|
false, NULL,
|
|
|
- &mmio_flip->i915->rps.mmioflips));
|
|
|
- i915_gem_request_unreference(mmio_flip->req);
|
|
|
- }
|
|
|
+ &dev_priv->rps.mmioflips));
|
|
|
|
|
|
/* For framebuffer backed by dmabuf, wait for fence */
|
|
|
if (obj->base.dma_buf)
|
|
@@ -11343,29 +11384,15 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
|
|
|
false, false,
|
|
|
MAX_SCHEDULE_TIMEOUT) < 0);
|
|
|
|
|
|
- intel_do_mmio_flip(mmio_flip);
|
|
|
- kfree(mmio_flip);
|
|
|
-}
|
|
|
-
|
|
|
-static int intel_queue_mmio_flip(struct drm_device *dev,
|
|
|
- struct drm_crtc *crtc,
|
|
|
- struct drm_i915_gem_object *obj)
|
|
|
-{
|
|
|
- struct intel_mmio_flip *mmio_flip;
|
|
|
-
|
|
|
- mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
|
|
|
- if (mmio_flip == NULL)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- mmio_flip->i915 = to_i915(dev);
|
|
|
- mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
|
|
|
- mmio_flip->crtc = to_intel_crtc(crtc);
|
|
|
- mmio_flip->rotation = crtc->primary->state->rotation;
|
|
|
+ intel_pipe_update_start(crtc);
|
|
|
|
|
|
- INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
|
|
|
- schedule_work(&mmio_flip->work);
|
|
|
+ if (INTEL_GEN(dev_priv) >= 9)
|
|
|
+ skl_do_mmio_flip(crtc, work->rotation, work);
|
|
|
+ else
|
|
|
+ /* use_mmio_flip() retricts MMIO flips to ilk+ */
|
|
|
+ ilk_do_mmio_flip(crtc, work);
|
|
|
|
|
|
- return 0;
|
|
|
+ intel_pipe_update_end(crtc, work);
|
|
|
}
|
|
|
|
|
|
static int intel_default_queue_flip(struct drm_device *dev,
|
|
@@ -11378,36 +11405,32 @@ static int intel_default_queue_flip(struct drm_device *dev,
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
|
|
|
-static bool __intel_pageflip_stall_check(struct drm_device *dev,
|
|
|
- struct drm_crtc *crtc)
|
|
|
+static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
|
|
|
+ struct intel_crtc *intel_crtc,
|
|
|
+ struct intel_flip_work *work)
|
|
|
{
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
- struct intel_unpin_work *work = intel_crtc->unpin_work;
|
|
|
- u32 addr;
|
|
|
- u32 pending;
|
|
|
-
|
|
|
- pending = atomic_read(&work->pending);
|
|
|
- /* ensure that the unpin work is consistent wrt ->pending. */
|
|
|
- smp_rmb();
|
|
|
+ u32 addr, vblank;
|
|
|
|
|
|
- if (!pending)
|
|
|
+ if (!atomic_read(&work->pending))
|
|
|
return false;
|
|
|
|
|
|
+ smp_rmb();
|
|
|
+
|
|
|
+ vblank = intel_crtc_get_vblank_counter(intel_crtc);
|
|
|
if (work->flip_ready_vblank == 0) {
|
|
|
if (work->flip_queued_req &&
|
|
|
!i915_gem_request_completed(work->flip_queued_req, true))
|
|
|
return false;
|
|
|
|
|
|
- work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
|
|
|
+ work->flip_ready_vblank = vblank;
|
|
|
}
|
|
|
|
|
|
- if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
|
|
|
+ if (vblank - work->flip_ready_vblank < 3)
|
|
|
return false;
|
|
|
|
|
|
/* Potential stall - if we see that the flip has happened,
|
|
|
* assume a missed interrupt. */
|
|
|
- if (INTEL_INFO(dev)->gen >= 4)
|
|
|
+ if (INTEL_GEN(dev_priv) >= 4)
|
|
|
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
|
|
|
else
|
|
|
addr = I915_READ(DSPADDR(intel_crtc->plane));
|
|
@@ -11424,7 +11447,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
|
|
|
struct drm_device *dev = dev_priv->dev;
|
|
|
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
- struct intel_unpin_work *work;
|
|
|
+ struct intel_flip_work *work;
|
|
|
|
|
|
WARN_ON(!in_interrupt());
|
|
|
|
|
@@ -11432,15 +11455,19 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
|
|
|
return;
|
|
|
|
|
|
spin_lock(&dev->event_lock);
|
|
|
- work = intel_crtc->unpin_work;
|
|
|
- if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
|
|
|
- WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
|
|
|
- work->flip_queued_vblank, drm_vblank_count(dev, pipe));
|
|
|
+ work = intel_crtc->flip_work;
|
|
|
+
|
|
|
+ if (work != NULL && !is_mmio_work(work) &&
|
|
|
+ __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
|
|
|
+ WARN_ONCE(1,
|
|
|
+ "Kicking stuck page flip: queued at %d, now %d\n",
|
|
|
+ work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
|
|
|
page_flip_completed(intel_crtc);
|
|
|
work = NULL;
|
|
|
}
|
|
|
- if (work != NULL &&
|
|
|
- drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
|
|
|
+
|
|
|
+ if (work != NULL && !is_mmio_work(work) &&
|
|
|
+ intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
|
|
|
intel_queue_rps_boost_for_request(work->flip_queued_req);
|
|
|
spin_unlock(&dev->event_lock);
|
|
|
}
|
|
@@ -11457,7 +11484,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
struct drm_plane *primary = crtc->primary;
|
|
|
enum pipe pipe = intel_crtc->pipe;
|
|
|
- struct intel_unpin_work *work;
|
|
|
+ struct intel_flip_work *work;
|
|
|
struct intel_engine_cs *engine;
|
|
|
bool mmio_flip;
|
|
|
struct drm_i915_gem_request *request = NULL;
|
|
@@ -11494,19 +11521,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
|
work->event = event;
|
|
|
work->crtc = crtc;
|
|
|
work->old_fb = old_fb;
|
|
|
- INIT_WORK(&work->work, intel_unpin_work_fn);
|
|
|
+ INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
|
|
|
|
|
|
ret = drm_crtc_vblank_get(crtc);
|
|
|
if (ret)
|
|
|
goto free_work;
|
|
|
|
|
|
- /* We borrow the event spin lock for protecting unpin_work */
|
|
|
+ /* We borrow the event spin lock for protecting flip_work */
|
|
|
spin_lock_irq(&dev->event_lock);
|
|
|
- if (intel_crtc->unpin_work) {
|
|
|
+ if (intel_crtc->flip_work) {
|
|
|
/* Before declaring the flip queue wedged, check if
|
|
|
* the hardware completed the operation behind our backs.
|
|
|
*/
|
|
|
- if (__intel_pageflip_stall_check(dev, crtc)) {
|
|
|
+ if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
|
|
|
DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
|
|
|
page_flip_completed(intel_crtc);
|
|
|
} else {
|
|
@@ -11518,7 +11545,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
}
|
|
|
- intel_crtc->unpin_work = work;
|
|
|
+ intel_crtc->flip_work = work;
|
|
|
spin_unlock_irq(&dev->event_lock);
|
|
|
|
|
|
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
|
|
@@ -11589,26 +11616,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
|
work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
|
|
|
obj, 0);
|
|
|
work->gtt_offset += intel_crtc->dspaddr_offset;
|
|
|
+ work->rotation = crtc->primary->state->rotation;
|
|
|
|
|
|
if (mmio_flip) {
|
|
|
- work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
|
|
|
+ INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
|
|
|
|
|
|
i915_gem_request_assign(&work->flip_queued_req,
|
|
|
obj->last_write_req);
|
|
|
|
|
|
- ret = intel_queue_mmio_flip(dev, crtc, obj);
|
|
|
- if (ret)
|
|
|
- goto cleanup_unpin;
|
|
|
+ schedule_work(&work->mmio_work);
|
|
|
} else {
|
|
|
+ i915_gem_request_assign(&work->flip_queued_req, request);
|
|
|
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
|
|
|
page_flip_flags);
|
|
|
if (ret)
|
|
|
goto cleanup_unpin;
|
|
|
|
|
|
- i915_gem_request_assign(&work->flip_queued_req, request);
|
|
|
-
|
|
|
- work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
|
|
|
- intel_mark_page_flip_active(work);
|
|
|
+ intel_mark_page_flip_active(intel_crtc, work);
|
|
|
|
|
|
i915_add_request_no_flush(request);
|
|
|
}
|
|
@@ -11639,7 +11663,7 @@ cleanup:
|
|
|
drm_framebuffer_unreference(work->old_fb);
|
|
|
|
|
|
spin_lock_irq(&dev->event_lock);
|
|
|
- intel_crtc->unpin_work = NULL;
|
|
|
+ intel_crtc->flip_work = NULL;
|
|
|
spin_unlock_irq(&dev->event_lock);
|
|
|
|
|
|
drm_crtc_vblank_put(crtc);
|
|
@@ -13935,7 +13959,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
|
|
|
{
|
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
|
|
|
- intel_pipe_update_end(intel_crtc);
|
|
|
+ intel_pipe_update_end(intel_crtc, NULL);
|
|
|
}
|
|
|
|
|
|
/**
|