|
@@ -73,8 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
|
|
DRM_FORMAT_ARGB8888,
|
|
DRM_FORMAT_ARGB8888,
|
|
};
|
|
};
|
|
|
|
|
|
-static void intel_increase_pllclock(struct drm_device *dev,
|
|
|
|
- enum pipe pipe);
|
|
|
|
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
|
|
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
|
|
|
|
|
|
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
|
|
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
|
|
@@ -889,60 +887,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
|
return intel_crtc->config.cpu_transcoder;
|
|
return intel_crtc->config.cpu_transcoder;
|
|
}
|
|
}
|
|
|
|
|
|
-static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
|
|
|
|
-{
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
- u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
|
|
|
|
-
|
|
|
|
- frame = I915_READ(frame_reg);
|
|
|
|
-
|
|
|
|
- if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
|
|
|
|
- WARN(1, "vblank wait on pipe %c timed out\n",
|
|
|
|
- pipe_name(pipe));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * intel_wait_for_vblank - wait for vblank on a given pipe
|
|
|
|
- * @dev: drm device
|
|
|
|
- * @pipe: pipe to wait for
|
|
|
|
- *
|
|
|
|
- * Wait for vblank to occur on a given pipe. Needed for various bits of
|
|
|
|
- * mode setting code.
|
|
|
|
- */
|
|
|
|
-void intel_wait_for_vblank(struct drm_device *dev, int pipe)
|
|
|
|
-{
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
- int pipestat_reg = PIPESTAT(pipe);
|
|
|
|
-
|
|
|
|
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
|
|
|
|
- g4x_wait_for_vblank(dev, pipe);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Clear existing vblank status. Note this will clear any other
|
|
|
|
- * sticky status fields as well.
|
|
|
|
- *
|
|
|
|
- * This races with i915_driver_irq_handler() with the result
|
|
|
|
- * that either function could miss a vblank event. Here it is not
|
|
|
|
- * fatal, as we will either wait upon the next vblank interrupt or
|
|
|
|
- * timeout. Generally speaking intel_wait_for_vblank() is only
|
|
|
|
- * called during modeset at which time the GPU should be idle and
|
|
|
|
- * should *not* be performing page flips and thus not waiting on
|
|
|
|
- * vblanks...
|
|
|
|
- * Currently, the result of us stealing a vblank from the irq
|
|
|
|
- * handler is that a single frame will be skipped during swapbuffers.
|
|
|
|
- */
|
|
|
|
- I915_WRITE(pipestat_reg,
|
|
|
|
- I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
|
|
|
|
-
|
|
|
|
- /* Wait for vblank interrupt bit to set */
|
|
|
|
- if (wait_for(I915_READ(pipestat_reg) &
|
|
|
|
- PIPE_VBLANK_INTERRUPT_STATUS,
|
|
|
|
- 50))
|
|
|
|
- DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
|
|
|
|
- pipe_name(pipe));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
|
|
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
|
|
{
|
|
{
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
@@ -1189,8 +1133,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
|
state_string(state), state_string(cur_state));
|
|
state_string(state), state_string(cur_state));
|
|
}
|
|
}
|
|
|
|
|
|
-static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
|
|
|
- enum pipe pipe)
|
|
|
|
|
|
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
|
|
|
+ enum pipe pipe)
|
|
{
|
|
{
|
|
struct drm_device *dev = dev_priv->dev;
|
|
struct drm_device *dev = dev_priv->dev;
|
|
int pp_reg;
|
|
int pp_reg;
|
|
@@ -1263,7 +1207,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
|
|
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
|
|
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
|
|
state = true;
|
|
state = true;
|
|
|
|
|
|
- if (!intel_display_power_enabled(dev_priv,
|
|
|
|
|
|
+ if (!intel_display_power_is_enabled(dev_priv,
|
|
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
|
|
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
|
|
cur_state = false;
|
|
cur_state = false;
|
|
} else {
|
|
} else {
|
|
@@ -1332,7 +1276,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
|
|
int reg, sprite;
|
|
int reg, sprite;
|
|
u32 val;
|
|
u32 val;
|
|
|
|
|
|
- if (IS_VALLEYVIEW(dev)) {
|
|
|
|
|
|
+ if (INTEL_INFO(dev)->gen >= 9) {
|
|
|
|
+ for_each_sprite(pipe, sprite) {
|
|
|
|
+ val = I915_READ(PLANE_CTL(pipe, sprite));
|
|
|
|
+ WARN(val & PLANE_CTL_ENABLE,
|
|
|
|
+ "plane %d assertion failure, should be off on pipe %c but is still active\n",
|
|
|
|
+ sprite, pipe_name(pipe));
|
|
|
|
+ }
|
|
|
|
+ } else if (IS_VALLEYVIEW(dev)) {
|
|
for_each_sprite(pipe, sprite) {
|
|
for_each_sprite(pipe, sprite) {
|
|
reg = SPCNTR(pipe, sprite);
|
|
reg = SPCNTR(pipe, sprite);
|
|
val = I915_READ(reg);
|
|
val = I915_READ(reg);
|
|
@@ -2233,7 +2184,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
|
|
|
|
|
switch (obj->tiling_mode) {
|
|
switch (obj->tiling_mode) {
|
|
case I915_TILING_NONE:
|
|
case I915_TILING_NONE:
|
|
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
|
|
|
|
|
|
+ if (INTEL_INFO(dev)->gen >= 9)
|
|
|
|
+ alignment = 256 * 1024;
|
|
|
|
+ else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
|
|
alignment = 128 * 1024;
|
|
alignment = 128 * 1024;
|
|
else if (INTEL_INFO(dev)->gen >= 4)
|
|
else if (INTEL_INFO(dev)->gen >= 4)
|
|
alignment = 4 * 1024;
|
|
alignment = 4 * 1024;
|
|
@@ -2241,8 +2194,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
|
alignment = 64 * 1024;
|
|
alignment = 64 * 1024;
|
|
break;
|
|
break;
|
|
case I915_TILING_X:
|
|
case I915_TILING_X:
|
|
- /* pin() will align the object as required by fence */
|
|
|
|
- alignment = 0;
|
|
|
|
|
|
+ if (INTEL_INFO(dev)->gen >= 9)
|
|
|
|
+ alignment = 256 * 1024;
|
|
|
|
+ else {
|
|
|
|
+ /* pin() will align the object as required by fence */
|
|
|
|
+ alignment = 0;
|
|
|
|
+ }
|
|
break;
|
|
break;
|
|
case I915_TILING_Y:
|
|
case I915_TILING_Y:
|
|
WARN(1, "Y tiled bo slipped through, driver bug!\n");
|
|
WARN(1, "Y tiled bo slipped through, driver bug!\n");
|
|
@@ -2672,6 +2629,90 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
|
|
POSTING_READ(reg);
|
|
POSTING_READ(reg);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
|
|
|
|
+ struct drm_framebuffer *fb,
|
|
|
|
+ int x, int y)
|
|
|
|
+{
|
|
|
|
+ struct drm_device *dev = crtc->dev;
|
|
|
|
+ struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
|
+ struct intel_framebuffer *intel_fb;
|
|
|
|
+ struct drm_i915_gem_object *obj;
|
|
|
|
+ int pipe = intel_crtc->pipe;
|
|
|
|
+ u32 plane_ctl, stride;
|
|
|
|
+
|
|
|
|
+ if (!intel_crtc->primary_enabled) {
|
|
|
|
+ I915_WRITE(PLANE_CTL(pipe, 0), 0);
|
|
|
|
+ I915_WRITE(PLANE_SURF(pipe, 0), 0);
|
|
|
|
+ POSTING_READ(PLANE_CTL(pipe, 0));
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ plane_ctl = PLANE_CTL_ENABLE |
|
|
|
|
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
|
|
|
|
+ PLANE_CTL_PIPE_CSC_ENABLE;
|
|
|
|
+
|
|
|
|
+ switch (fb->pixel_format) {
|
|
|
|
+ case DRM_FORMAT_RGB565:
|
|
|
|
+ plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
|
|
|
|
+ break;
|
|
|
|
+ case DRM_FORMAT_XRGB8888:
|
|
|
|
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
|
|
|
|
+ break;
|
|
|
|
+ case DRM_FORMAT_XBGR8888:
|
|
|
|
+ plane_ctl |= PLANE_CTL_ORDER_RGBX;
|
|
|
|
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
|
|
|
|
+ break;
|
|
|
|
+ case DRM_FORMAT_XRGB2101010:
|
|
|
|
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
|
|
|
|
+ break;
|
|
|
|
+ case DRM_FORMAT_XBGR2101010:
|
|
|
|
+ plane_ctl |= PLANE_CTL_ORDER_RGBX;
|
|
|
|
+ plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ BUG();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ intel_fb = to_intel_framebuffer(fb);
|
|
|
|
+ obj = intel_fb->obj;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * The stride is either expressed as a multiple of 64 bytes chunks for
|
|
|
|
+ * linear buffers or in number of tiles for tiled buffers.
|
|
|
|
+ */
|
|
|
|
+ switch (obj->tiling_mode) {
|
|
|
|
+ case I915_TILING_NONE:
|
|
|
|
+ stride = fb->pitches[0] >> 6;
|
|
|
|
+ break;
|
|
|
|
+ case I915_TILING_X:
|
|
|
|
+ plane_ctl |= PLANE_CTL_TILED_X;
|
|
|
|
+ stride = fb->pitches[0] >> 9;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ BUG();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
|
|
|
|
+
|
|
|
|
+ I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
|
|
|
|
+
|
|
|
|
+ DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
|
|
|
|
+ i915_gem_obj_ggtt_offset(obj),
|
|
|
|
+ x, y, fb->width, fb->height,
|
|
|
|
+ fb->pitches[0]);
|
|
|
|
+
|
|
|
|
+ I915_WRITE(PLANE_POS(pipe, 0), 0);
|
|
|
|
+ I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
|
|
|
|
+ I915_WRITE(PLANE_SIZE(pipe, 0),
|
|
|
|
+ (intel_crtc->config.pipe_src_h - 1) << 16 |
|
|
|
|
+ (intel_crtc->config.pipe_src_w - 1));
|
|
|
|
+ I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
|
|
|
|
+ I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
|
|
|
|
+
|
|
|
|
+ POSTING_READ(PLANE_SURF(pipe, 0));
|
|
|
|
+}
|
|
|
|
+
|
|
/* Assume fb object is pinned & idle & fenced and just update base pointers */
|
|
/* Assume fb object is pinned & idle & fenced and just update base pointers */
|
|
static int
|
|
static int
|
|
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|
@@ -2682,7 +2723,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|
|
|
|
|
if (dev_priv->display.disable_fbc)
|
|
if (dev_priv->display.disable_fbc)
|
|
dev_priv->display.disable_fbc(dev);
|
|
dev_priv->display.disable_fbc(dev);
|
|
- intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
|
|
|
|
|
|
|
|
dev_priv->display.update_primary_plane(crtc, fb, x, y);
|
|
dev_priv->display.update_primary_plane(crtc, fb, x, y);
|
|
|
|
|
|
@@ -2762,20 +2802,58 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
|
|
struct drm_device *dev = crtc->dev;
|
|
struct drm_device *dev = crtc->dev;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
- unsigned long flags;
|
|
|
|
bool pending;
|
|
bool pending;
|
|
|
|
|
|
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
|
|
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
|
|
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
|
|
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
- spin_lock_irqsave(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_lock_irq(&dev->event_lock);
|
|
pending = to_intel_crtc(crtc)->unpin_work != NULL;
|
|
pending = to_intel_crtc(crtc)->unpin_work != NULL;
|
|
- spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev->event_lock);
|
|
|
|
|
|
return pending;
|
|
return pending;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void intel_update_pipe_size(struct intel_crtc *crtc)
|
|
|
|
+{
|
|
|
|
+ struct drm_device *dev = crtc->base.dev;
|
|
|
|
+ struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
+ const struct drm_display_mode *adjusted_mode;
|
|
|
|
+
|
|
|
|
+ if (!i915.fastboot)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Update pipe size and adjust fitter if needed: the reason for this is
|
|
|
|
+ * that in compute_mode_changes we check the native mode (not the pfit
|
|
|
|
+ * mode) to see if we can flip rather than do a full mode set. In the
|
|
|
|
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
|
|
|
|
+ * pfit state, we'll end up with a big fb scanned out into the wrong
|
|
|
|
+ * sized surface.
|
|
|
|
+ *
|
|
|
|
+ * To fix this properly, we need to hoist the checks up into
|
|
|
|
+ * compute_mode_changes (or above), check the actual pfit state and
|
|
|
|
+ * whether the platform allows pfit disable with pipe active, and only
|
|
|
|
+ * then update the pipesrc and pfit state, even on the flip path.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ adjusted_mode = &crtc->config.adjusted_mode;
|
|
|
|
+
|
|
|
|
+ I915_WRITE(PIPESRC(crtc->pipe),
|
|
|
|
+ ((adjusted_mode->crtc_hdisplay - 1) << 16) |
|
|
|
|
+ (adjusted_mode->crtc_vdisplay - 1));
|
|
|
|
+ if (!crtc->config.pch_pfit.enabled &&
|
|
|
|
+ (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) ||
|
|
|
|
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))) {
|
|
|
|
+ I915_WRITE(PF_CTL(crtc->pipe), 0);
|
|
|
|
+ I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
|
|
|
|
+ I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
|
|
|
|
+ }
|
|
|
|
+ crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
|
|
|
|
+ crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
|
|
|
|
+}
|
|
|
|
+
|
|
static int
|
|
static int
|
|
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
|
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
|
struct drm_framebuffer *fb)
|
|
struct drm_framebuffer *fb)
|
|
@@ -2818,36 +2896,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * Update pipe size and adjust fitter if needed: the reason for this is
|
|
|
|
- * that in compute_mode_changes we check the native mode (not the pfit
|
|
|
|
- * mode) to see if we can flip rather than do a full mode set. In the
|
|
|
|
- * fastboot case, we'll flip, but if we don't update the pipesrc and
|
|
|
|
- * pfit state, we'll end up with a big fb scanned out into the wrong
|
|
|
|
- * sized surface.
|
|
|
|
- *
|
|
|
|
- * To fix this properly, we need to hoist the checks up into
|
|
|
|
- * compute_mode_changes (or above), check the actual pfit state and
|
|
|
|
- * whether the platform allows pfit disable with pipe active, and only
|
|
|
|
- * then update the pipesrc and pfit state, even on the flip path.
|
|
|
|
- */
|
|
|
|
- if (i915.fastboot) {
|
|
|
|
- const struct drm_display_mode *adjusted_mode =
|
|
|
|
- &intel_crtc->config.adjusted_mode;
|
|
|
|
-
|
|
|
|
- I915_WRITE(PIPESRC(intel_crtc->pipe),
|
|
|
|
- ((adjusted_mode->crtc_hdisplay - 1) << 16) |
|
|
|
|
- (adjusted_mode->crtc_vdisplay - 1));
|
|
|
|
- if (!intel_crtc->config.pch_pfit.enabled &&
|
|
|
|
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
|
|
|
|
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
|
|
|
|
- I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
|
|
|
|
- I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
|
|
|
|
- I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
|
|
|
|
- }
|
|
|
|
- intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
|
|
|
|
- intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
|
|
|
|
- }
|
|
|
|
|
|
+ intel_update_pipe_size(intel_crtc);
|
|
|
|
|
|
dev_priv->display.update_primary_plane(crtc, fb, x, y);
|
|
dev_priv->display.update_primary_plane(crtc, fb, x, y);
|
|
|
|
|
|
@@ -3472,14 +3521,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
|
!intel_crtc_has_pending_flip(crtc),
|
|
!intel_crtc_has_pending_flip(crtc),
|
|
60*HZ) == 0)) {
|
|
60*HZ) == 0)) {
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
- spin_lock_irqsave(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_lock_irq(&dev->event_lock);
|
|
if (intel_crtc->unpin_work) {
|
|
if (intel_crtc->unpin_work) {
|
|
WARN_ONCE(1, "Removing stuck page flip\n");
|
|
WARN_ONCE(1, "Removing stuck page flip\n");
|
|
page_flip_completed(intel_crtc);
|
|
page_flip_completed(intel_crtc);
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev->event_lock);
|
|
}
|
|
}
|
|
|
|
|
|
if (crtc->primary->fb) {
|
|
if (crtc->primary->fb) {
|
|
@@ -4038,10 +4086,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
int pipe = intel_crtc->pipe;
|
|
int pipe = intel_crtc->pipe;
|
|
|
|
|
|
- assert_vblank_disabled(crtc);
|
|
|
|
-
|
|
|
|
- drm_vblank_on(dev, pipe);
|
|
|
|
-
|
|
|
|
intel_enable_primary_hw_plane(crtc->primary, crtc);
|
|
intel_enable_primary_hw_plane(crtc->primary, crtc);
|
|
intel_enable_planes(crtc);
|
|
intel_enable_planes(crtc);
|
|
intel_crtc_update_cursor(crtc, true);
|
|
intel_crtc_update_cursor(crtc, true);
|
|
@@ -4087,10 +4131,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
|
|
* consider this a flip to a NULL plane.
|
|
* consider this a flip to a NULL plane.
|
|
*/
|
|
*/
|
|
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
|
|
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
|
|
-
|
|
|
|
- drm_vblank_off(dev, pipe);
|
|
|
|
-
|
|
|
|
- assert_vblank_disabled(crtc);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|
static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|
@@ -4160,6 +4200,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|
if (HAS_PCH_CPT(dev))
|
|
if (HAS_PCH_CPT(dev))
|
|
cpt_verify_modeset(dev, intel_crtc->pipe);
|
|
cpt_verify_modeset(dev, intel_crtc->pipe);
|
|
|
|
|
|
|
|
+ assert_vblank_disabled(crtc);
|
|
|
|
+ drm_crtc_vblank_on(crtc);
|
|
|
|
+
|
|
intel_crtc_enable_planes(crtc);
|
|
intel_crtc_enable_planes(crtc);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4272,6 +4315,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
|
intel_opregion_notify_encoder(encoder, true);
|
|
intel_opregion_notify_encoder(encoder, true);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ assert_vblank_disabled(crtc);
|
|
|
|
+ drm_crtc_vblank_on(crtc);
|
|
|
|
+
|
|
/* If we change the relative order between pipe/planes enabling, we need
|
|
/* If we change the relative order between pipe/planes enabling, we need
|
|
* to change the workaround. */
|
|
* to change the workaround. */
|
|
haswell_mode_set_planes_workaround(intel_crtc);
|
|
haswell_mode_set_planes_workaround(intel_crtc);
|
|
@@ -4307,6 +4353,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
|
|
|
|
|
intel_crtc_disable_planes(crtc);
|
|
intel_crtc_disable_planes(crtc);
|
|
|
|
|
|
|
|
+ drm_crtc_vblank_off(crtc);
|
|
|
|
+ assert_vblank_disabled(crtc);
|
|
|
|
+
|
|
for_each_encoder_on_crtc(dev, crtc, encoder)
|
|
for_each_encoder_on_crtc(dev, crtc, encoder)
|
|
encoder->disable(encoder);
|
|
encoder->disable(encoder);
|
|
|
|
|
|
@@ -4369,6 +4418,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
|
|
|
|
|
intel_crtc_disable_planes(crtc);
|
|
intel_crtc_disable_planes(crtc);
|
|
|
|
|
|
|
|
+ drm_crtc_vblank_off(crtc);
|
|
|
|
+ assert_vblank_disabled(crtc);
|
|
|
|
+
|
|
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
|
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
|
intel_opregion_notify_encoder(encoder, false);
|
|
intel_opregion_notify_encoder(encoder, false);
|
|
encoder->disable(encoder);
|
|
encoder->disable(encoder);
|
|
@@ -4510,20 +4562,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
|
|
return mask;
|
|
return mask;
|
|
}
|
|
}
|
|
|
|
|
|
-void intel_display_set_init_power(struct drm_i915_private *dev_priv,
|
|
|
|
- bool enable)
|
|
|
|
-{
|
|
|
|
- if (dev_priv->power_domains.init_power_on == enable)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- if (enable)
|
|
|
|
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
|
|
|
- else
|
|
|
|
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
|
|
|
-
|
|
|
|
- dev_priv->power_domains.init_power_on = enable;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void modeset_update_crtc_power_domains(struct drm_device *dev)
|
|
static void modeset_update_crtc_power_domains(struct drm_device *dev)
|
|
{
|
|
{
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
@@ -4835,6 +4873,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
|
for_each_encoder_on_crtc(dev, crtc, encoder)
|
|
for_each_encoder_on_crtc(dev, crtc, encoder)
|
|
encoder->enable(encoder);
|
|
encoder->enable(encoder);
|
|
|
|
|
|
|
|
+ assert_vblank_disabled(crtc);
|
|
|
|
+ drm_crtc_vblank_on(crtc);
|
|
|
|
+
|
|
intel_crtc_enable_planes(crtc);
|
|
intel_crtc_enable_planes(crtc);
|
|
|
|
|
|
/* Underruns don't raise interrupts, so check manually. */
|
|
/* Underruns don't raise interrupts, so check manually. */
|
|
@@ -4892,6 +4933,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
|
for_each_encoder_on_crtc(dev, crtc, encoder)
|
|
for_each_encoder_on_crtc(dev, crtc, encoder)
|
|
encoder->enable(encoder);
|
|
encoder->enable(encoder);
|
|
|
|
|
|
|
|
+ assert_vblank_disabled(crtc);
|
|
|
|
+ drm_crtc_vblank_on(crtc);
|
|
|
|
+
|
|
intel_crtc_enable_planes(crtc);
|
|
intel_crtc_enable_planes(crtc);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -4955,9 +4999,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
|
|
intel_set_memory_cxsr(dev_priv, false);
|
|
intel_set_memory_cxsr(dev_priv, false);
|
|
intel_crtc_disable_planes(crtc);
|
|
intel_crtc_disable_planes(crtc);
|
|
|
|
|
|
- for_each_encoder_on_crtc(dev, crtc, encoder)
|
|
|
|
- encoder->disable(encoder);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* On gen2 planes are double buffered but the pipe isn't, so we must
|
|
* On gen2 planes are double buffered but the pipe isn't, so we must
|
|
* wait for planes to fully turn off before disabling the pipe.
|
|
* wait for planes to fully turn off before disabling the pipe.
|
|
@@ -4966,6 +5007,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
|
|
*/
|
|
*/
|
|
intel_wait_for_vblank(dev, pipe);
|
|
intel_wait_for_vblank(dev, pipe);
|
|
|
|
|
|
|
|
+ drm_crtc_vblank_off(crtc);
|
|
|
|
+ assert_vblank_disabled(crtc);
|
|
|
|
+
|
|
|
|
+ for_each_encoder_on_crtc(dev, crtc, encoder)
|
|
|
|
+ encoder->disable(encoder);
|
|
|
|
+
|
|
intel_disable_pipe(intel_crtc);
|
|
intel_disable_pipe(intel_crtc);
|
|
|
|
|
|
i9xx_pfit_disable(intel_crtc);
|
|
i9xx_pfit_disable(intel_crtc);
|
|
@@ -6434,8 +6481,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
uint32_t tmp;
|
|
uint32_t tmp;
|
|
|
|
|
|
- if (!intel_display_power_enabled(dev_priv,
|
|
|
|
- POWER_DOMAIN_PIPE(crtc->pipe)))
|
|
|
|
|
|
+ if (!intel_display_power_is_enabled(dev_priv,
|
|
|
|
+ POWER_DOMAIN_PIPE(crtc->pipe)))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
|
|
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
|
|
@@ -7021,7 +7068,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
|
|
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
|
|
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
|
|
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
|
|
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
|
|
|
|
|
|
- if (IS_BROADWELL(dev)) {
|
|
|
|
|
|
+ if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
|
|
val = 0;
|
|
val = 0;
|
|
|
|
|
|
switch (intel_crtc->config.pipe_bpp) {
|
|
switch (intel_crtc->config.pipe_bpp) {
|
|
@@ -7444,8 +7491,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
uint32_t tmp;
|
|
uint32_t tmp;
|
|
|
|
|
|
- if (!intel_display_power_enabled(dev_priv,
|
|
|
|
- POWER_DOMAIN_PIPE(crtc->pipe)))
|
|
|
|
|
|
+ if (!intel_display_power_is_enabled(dev_priv,
|
|
|
|
+ POWER_DOMAIN_PIPE(crtc->pipe)))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
|
|
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
|
|
@@ -7638,7 +7685,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
|
|
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
|
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
|
{
|
|
{
|
|
uint32_t val;
|
|
uint32_t val;
|
|
- unsigned long irqflags;
|
|
|
|
|
|
|
|
val = I915_READ(LCPLL_CTL);
|
|
val = I915_READ(LCPLL_CTL);
|
|
|
|
|
|
@@ -7658,10 +7704,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
|
* to call special forcewake code that doesn't touch runtime PM and
|
|
* to call special forcewake code that doesn't touch runtime PM and
|
|
* doesn't enable the forcewake delayed work.
|
|
* doesn't enable the forcewake delayed work.
|
|
*/
|
|
*/
|
|
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
|
|
|
|
|
+ spin_lock_irq(&dev_priv->uncore.lock);
|
|
if (dev_priv->uncore.forcewake_count++ == 0)
|
|
if (dev_priv->uncore.forcewake_count++ == 0)
|
|
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
|
|
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
|
|
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev_priv->uncore.lock);
|
|
|
|
|
|
if (val & LCPLL_POWER_DOWN_ALLOW) {
|
|
if (val & LCPLL_POWER_DOWN_ALLOW) {
|
|
val &= ~LCPLL_POWER_DOWN_ALLOW;
|
|
val &= ~LCPLL_POWER_DOWN_ALLOW;
|
|
@@ -7692,10 +7738,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
|
}
|
|
}
|
|
|
|
|
|
/* See the big comment above. */
|
|
/* See the big comment above. */
|
|
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
|
|
|
|
|
+ spin_lock_irq(&dev_priv->uncore.lock);
|
|
if (--dev_priv->uncore.forcewake_count == 0)
|
|
if (--dev_priv->uncore.forcewake_count == 0)
|
|
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
|
|
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
|
|
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev_priv->uncore.lock);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -7824,7 +7870,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
|
|
* DDI E. So just check whether this pipe is wired to DDI E and whether
|
|
* DDI E. So just check whether this pipe is wired to DDI E and whether
|
|
* the PCH transcoder is on.
|
|
* the PCH transcoder is on.
|
|
*/
|
|
*/
|
|
- if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
|
|
|
|
|
|
+ if (INTEL_INFO(dev)->gen < 9 &&
|
|
|
|
+ (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
|
|
pipe_config->has_pch_encoder = true;
|
|
pipe_config->has_pch_encoder = true;
|
|
|
|
|
|
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
|
|
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
|
|
@@ -7843,7 +7890,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
|
enum intel_display_power_domain pfit_domain;
|
|
enum intel_display_power_domain pfit_domain;
|
|
uint32_t tmp;
|
|
uint32_t tmp;
|
|
|
|
|
|
- if (!intel_display_power_enabled(dev_priv,
|
|
|
|
|
|
+ if (!intel_display_power_is_enabled(dev_priv,
|
|
POWER_DOMAIN_PIPE(crtc->pipe)))
|
|
POWER_DOMAIN_PIPE(crtc->pipe)))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
@@ -7872,7 +7919,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
|
pipe_config->cpu_transcoder = TRANSCODER_EDP;
|
|
pipe_config->cpu_transcoder = TRANSCODER_EDP;
|
|
}
|
|
}
|
|
|
|
|
|
- if (!intel_display_power_enabled(dev_priv,
|
|
|
|
|
|
+ if (!intel_display_power_is_enabled(dev_priv,
|
|
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
|
|
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
@@ -7885,7 +7932,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
|
intel_get_pipe_timings(crtc, pipe_config);
|
|
intel_get_pipe_timings(crtc, pipe_config);
|
|
|
|
|
|
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
|
|
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
|
|
- if (intel_display_power_enabled(dev_priv, pfit_domain))
|
|
|
|
|
|
+ if (intel_display_power_is_enabled(dev_priv, pfit_domain))
|
|
ironlake_get_pfit_config(crtc, pipe_config);
|
|
ironlake_get_pfit_config(crtc, pipe_config);
|
|
|
|
|
|
if (IS_HASWELL(dev))
|
|
if (IS_HASWELL(dev))
|
|
@@ -8255,8 +8302,10 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
|
|
intel_crtc->cursor_cntl = 0;
|
|
intel_crtc->cursor_cntl = 0;
|
|
}
|
|
}
|
|
|
|
|
|
- if (intel_crtc->cursor_base != base)
|
|
|
|
|
|
+ if (intel_crtc->cursor_base != base) {
|
|
I915_WRITE(_CURABASE, base);
|
|
I915_WRITE(_CURABASE, base);
|
|
|
|
+ intel_crtc->cursor_base = base;
|
|
|
|
+ }
|
|
|
|
|
|
if (intel_crtc->cursor_size != size) {
|
|
if (intel_crtc->cursor_size != size) {
|
|
I915_WRITE(CURSIZE, size);
|
|
I915_WRITE(CURSIZE, size);
|
|
@@ -8296,9 +8345,10 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
cntl |= pipe << 28; /* Connect to correct pipe */
|
|
cntl |= pipe << 28; /* Connect to correct pipe */
|
|
|
|
+
|
|
|
|
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
|
|
|
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
|
|
}
|
|
}
|
|
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
|
|
|
- cntl |= CURSOR_PIPE_CSC_ENABLE;
|
|
|
|
|
|
|
|
if (intel_crtc->cursor_cntl != cntl) {
|
|
if (intel_crtc->cursor_cntl != cntl) {
|
|
I915_WRITE(CURCNTR(pipe), cntl);
|
|
I915_WRITE(CURCNTR(pipe), cntl);
|
|
@@ -8309,6 +8359,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
|
|
/* and commit changes on next vblank */
|
|
/* and commit changes on next vblank */
|
|
I915_WRITE(CURBASE(pipe), base);
|
|
I915_WRITE(CURBASE(pipe), base);
|
|
POSTING_READ(CURBASE(pipe));
|
|
POSTING_READ(CURBASE(pipe));
|
|
|
|
+
|
|
|
|
+ intel_crtc->cursor_base = base;
|
|
}
|
|
}
|
|
|
|
|
|
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
|
|
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
|
|
@@ -8359,7 +8411,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
|
i845_update_cursor(crtc, base);
|
|
i845_update_cursor(crtc, base);
|
|
else
|
|
else
|
|
i9xx_update_cursor(crtc, base);
|
|
i9xx_update_cursor(crtc, base);
|
|
- intel_crtc->cursor_base = base;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static bool cursor_size_ok(struct drm_device *dev,
|
|
static bool cursor_size_ok(struct drm_device *dev,
|
|
@@ -9023,35 +9074,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
|
|
return mode;
|
|
return mode;
|
|
}
|
|
}
|
|
|
|
|
|
-static void intel_increase_pllclock(struct drm_device *dev,
|
|
|
|
- enum pipe pipe)
|
|
|
|
-{
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
- int dpll_reg = DPLL(pipe);
|
|
|
|
- int dpll;
|
|
|
|
-
|
|
|
|
- if (!HAS_GMCH_DISPLAY(dev))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- if (!dev_priv->lvds_downclock_avail)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- dpll = I915_READ(dpll_reg);
|
|
|
|
- if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
|
|
|
|
- DRM_DEBUG_DRIVER("upclocking LVDS\n");
|
|
|
|
-
|
|
|
|
- assert_panel_unlocked(dev_priv, pipe);
|
|
|
|
-
|
|
|
|
- dpll &= ~DISPLAY_RATE_SELECT_FPA1;
|
|
|
|
- I915_WRITE(dpll_reg, dpll);
|
|
|
|
- intel_wait_for_vblank(dev, pipe);
|
|
|
|
-
|
|
|
|
- dpll = I915_READ(dpll_reg);
|
|
|
|
- if (dpll & DISPLAY_RATE_SELECT_FPA1)
|
|
|
|
- DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void intel_decrease_pllclock(struct drm_crtc *crtc)
|
|
static void intel_decrease_pllclock(struct drm_crtc *crtc)
|
|
{
|
|
{
|
|
struct drm_device *dev = crtc->dev;
|
|
struct drm_device *dev = crtc->dev;
|
|
@@ -9127,199 +9149,16 @@ out:
|
|
intel_runtime_pm_put(dev_priv);
|
|
intel_runtime_pm_put(dev_priv);
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * intel_mark_fb_busy - mark given planes as busy
|
|
|
|
- * @dev: DRM device
|
|
|
|
- * @frontbuffer_bits: bits for the affected planes
|
|
|
|
- * @ring: optional ring for asynchronous commands
|
|
|
|
- *
|
|
|
|
- * This function gets called every time the screen contents change. It can be
|
|
|
|
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
|
|
|
|
- */
|
|
|
|
-static void intel_mark_fb_busy(struct drm_device *dev,
|
|
|
|
- unsigned frontbuffer_bits,
|
|
|
|
- struct intel_engine_cs *ring)
|
|
|
|
-{
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
- enum pipe pipe;
|
|
|
|
-
|
|
|
|
- if (!i915.powersave)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- for_each_pipe(dev_priv, pipe) {
|
|
|
|
- if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- intel_increase_pllclock(dev, pipe);
|
|
|
|
- if (ring && intel_fbc_enabled(dev))
|
|
|
|
- ring->fbc_dirty = true;
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * intel_fb_obj_invalidate - invalidate frontbuffer object
|
|
|
|
- * @obj: GEM object to invalidate
|
|
|
|
- * @ring: set for asynchronous rendering
|
|
|
|
- *
|
|
|
|
- * This function gets called every time rendering on the given object starts and
|
|
|
|
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
|
|
|
|
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
|
|
|
|
- * until the rendering completes or a flip on this frontbuffer plane is
|
|
|
|
- * scheduled.
|
|
|
|
- */
|
|
|
|
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
|
|
|
- struct intel_engine_cs *ring)
|
|
|
|
-{
|
|
|
|
- struct drm_device *dev = obj->base.dev;
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
-
|
|
|
|
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
|
-
|
|
|
|
- if (!obj->frontbuffer_bits)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- if (ring) {
|
|
|
|
- mutex_lock(&dev_priv->fb_tracking.lock);
|
|
|
|
- dev_priv->fb_tracking.busy_bits
|
|
|
|
- |= obj->frontbuffer_bits;
|
|
|
|
- dev_priv->fb_tracking.flip_bits
|
|
|
|
- &= ~obj->frontbuffer_bits;
|
|
|
|
- mutex_unlock(&dev_priv->fb_tracking.lock);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
|
|
|
|
-
|
|
|
|
- intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * intel_frontbuffer_flush - flush frontbuffer
|
|
|
|
- * @dev: DRM device
|
|
|
|
- * @frontbuffer_bits: frontbuffer plane tracking bits
|
|
|
|
- *
|
|
|
|
- * This function gets called every time rendering on the given planes has
|
|
|
|
- * completed and frontbuffer caching can be started again. Flushes will get
|
|
|
|
- * delayed if they're blocked by some oustanding asynchronous rendering.
|
|
|
|
- *
|
|
|
|
- * Can be called without any locks held.
|
|
|
|
- */
|
|
|
|
-void intel_frontbuffer_flush(struct drm_device *dev,
|
|
|
|
- unsigned frontbuffer_bits)
|
|
|
|
-{
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
-
|
|
|
|
- /* Delay flushing when rings are still busy.*/
|
|
|
|
- mutex_lock(&dev_priv->fb_tracking.lock);
|
|
|
|
- frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
|
|
|
|
- mutex_unlock(&dev_priv->fb_tracking.lock);
|
|
|
|
-
|
|
|
|
- intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
|
|
|
|
-
|
|
|
|
- intel_edp_psr_flush(dev, frontbuffer_bits);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * FIXME: Unconditional fbc flushing here is a rather gross hack and
|
|
|
|
- * needs to be reworked into a proper frontbuffer tracking scheme like
|
|
|
|
- * psr employs.
|
|
|
|
- */
|
|
|
|
- if (IS_BROADWELL(dev))
|
|
|
|
- gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * intel_fb_obj_flush - flush frontbuffer object
|
|
|
|
- * @obj: GEM object to flush
|
|
|
|
- * @retire: set when retiring asynchronous rendering
|
|
|
|
- *
|
|
|
|
- * This function gets called every time rendering on the given object has
|
|
|
|
- * completed and frontbuffer caching can be started again. If @retire is true
|
|
|
|
- * then any delayed flushes will be unblocked.
|
|
|
|
- */
|
|
|
|
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
|
|
|
- bool retire)
|
|
|
|
-{
|
|
|
|
- struct drm_device *dev = obj->base.dev;
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
- unsigned frontbuffer_bits;
|
|
|
|
-
|
|
|
|
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
|
-
|
|
|
|
- if (!obj->frontbuffer_bits)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- frontbuffer_bits = obj->frontbuffer_bits;
|
|
|
|
-
|
|
|
|
- if (retire) {
|
|
|
|
- mutex_lock(&dev_priv->fb_tracking.lock);
|
|
|
|
- /* Filter out new bits since rendering started. */
|
|
|
|
- frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
|
|
|
|
-
|
|
|
|
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
|
|
|
- mutex_unlock(&dev_priv->fb_tracking.lock);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- intel_frontbuffer_flush(dev, frontbuffer_bits);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
|
|
|
|
- * @dev: DRM device
|
|
|
|
- * @frontbuffer_bits: frontbuffer plane tracking bits
|
|
|
|
- *
|
|
|
|
- * This function gets called after scheduling a flip on @obj. The actual
|
|
|
|
- * frontbuffer flushing will be delayed until completion is signalled with
|
|
|
|
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
|
|
|
|
- * flush will be cancelled.
|
|
|
|
- *
|
|
|
|
- * Can be called without any locks held.
|
|
|
|
- */
|
|
|
|
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
|
|
|
- unsigned frontbuffer_bits)
|
|
|
|
-{
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
-
|
|
|
|
- mutex_lock(&dev_priv->fb_tracking.lock);
|
|
|
|
- dev_priv->fb_tracking.flip_bits
|
|
|
|
- |= frontbuffer_bits;
|
|
|
|
- mutex_unlock(&dev_priv->fb_tracking.lock);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
|
|
|
|
- * @dev: DRM device
|
|
|
|
- * @frontbuffer_bits: frontbuffer plane tracking bits
|
|
|
|
- *
|
|
|
|
- * This function gets called after the flip has been latched and will complete
|
|
|
|
- * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
|
|
|
|
- *
|
|
|
|
- * Can be called without any locks held.
|
|
|
|
- */
|
|
|
|
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
|
|
|
- unsigned frontbuffer_bits)
|
|
|
|
-{
|
|
|
|
- struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
-
|
|
|
|
- mutex_lock(&dev_priv->fb_tracking.lock);
|
|
|
|
- /* Mask any cancelled flips. */
|
|
|
|
- frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
|
|
|
|
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
|
|
|
- mutex_unlock(&dev_priv->fb_tracking.lock);
|
|
|
|
-
|
|
|
|
- intel_frontbuffer_flush(dev, frontbuffer_bits);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void intel_crtc_destroy(struct drm_crtc *crtc)
|
|
static void intel_crtc_destroy(struct drm_crtc *crtc)
|
|
{
|
|
{
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
struct drm_device *dev = crtc->dev;
|
|
struct drm_device *dev = crtc->dev;
|
|
struct intel_unpin_work *work;
|
|
struct intel_unpin_work *work;
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
- spin_lock_irqsave(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_lock_irq(&dev->event_lock);
|
|
work = intel_crtc->unpin_work;
|
|
work = intel_crtc->unpin_work;
|
|
intel_crtc->unpin_work = NULL;
|
|
intel_crtc->unpin_work = NULL;
|
|
- spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev->event_lock);
|
|
|
|
|
|
if (work) {
|
|
if (work) {
|
|
cancel_work_sync(&work->work);
|
|
cancel_work_sync(&work->work);
|
|
@@ -9365,6 +9204,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
|
|
if (intel_crtc == NULL)
|
|
if (intel_crtc == NULL)
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * This is called both by irq handlers and the reset code (to complete
|
|
|
|
+ * lost pageflips) so needs the full irqsave spinlocks.
|
|
|
|
+ */
|
|
spin_lock_irqsave(&dev->event_lock, flags);
|
|
spin_lock_irqsave(&dev->event_lock, flags);
|
|
work = intel_crtc->unpin_work;
|
|
work = intel_crtc->unpin_work;
|
|
|
|
|
|
@@ -9446,7 +9289,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
|
|
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
|
|
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- /* NB: An MMIO update of the plane base pointer will also
|
|
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This is called both by irq handlers and the reset code (to complete
|
|
|
|
+ * lost pageflips) so needs the full irqsave spinlocks.
|
|
|
|
+ *
|
|
|
|
+ * NB: An MMIO update of the plane base pointer will also
|
|
* generate a page-flip completion irq, i.e. every modeset
|
|
* generate a page-flip completion irq, i.e. every modeset
|
|
* is also accompanied by a spurious intel_prepare_page_flip().
|
|
* is also accompanied by a spurious intel_prepare_page_flip().
|
|
*/
|
|
*/
|
|
@@ -9821,7 +9669,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
|
|
{
|
|
{
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
- unsigned long irq_flags;
|
|
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
if (WARN_ON(intel_crtc->mmio_flip.seqno))
|
|
if (WARN_ON(intel_crtc->mmio_flip.seqno))
|
|
@@ -9835,10 +9682,10 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
|
|
|
|
|
|
+ spin_lock_irq(&dev_priv->mmio_flip_lock);
|
|
intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
|
|
intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
|
|
intel_crtc->mmio_flip.ring_id = obj->ring->id;
|
|
intel_crtc->mmio_flip.ring_id = obj->ring->id;
|
|
- spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev_priv->mmio_flip_lock);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Double check to catch cases where irq fired before
|
|
* Double check to catch cases where irq fired before
|
|
@@ -9903,18 +9750,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
|
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
- unsigned long flags;
|
|
|
|
|
|
+
|
|
|
|
+ WARN_ON(!in_irq());
|
|
|
|
|
|
if (crtc == NULL)
|
|
if (crtc == NULL)
|
|
return;
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_lock(&dev->event_lock);
|
|
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
|
|
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
|
|
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
|
|
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
|
|
intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
|
|
intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
|
|
page_flip_completed(intel_crtc);
|
|
page_flip_completed(intel_crtc);
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_unlock(&dev->event_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
@@ -9930,7 +9778,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
enum pipe pipe = intel_crtc->pipe;
|
|
enum pipe pipe = intel_crtc->pipe;
|
|
struct intel_unpin_work *work;
|
|
struct intel_unpin_work *work;
|
|
struct intel_engine_cs *ring;
|
|
struct intel_engine_cs *ring;
|
|
- unsigned long flags;
|
|
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -9971,7 +9818,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
goto free_work;
|
|
goto free_work;
|
|
|
|
|
|
/* We borrow the event spin lock for protecting unpin_work */
|
|
/* We borrow the event spin lock for protecting unpin_work */
|
|
- spin_lock_irqsave(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_lock_irq(&dev->event_lock);
|
|
if (intel_crtc->unpin_work) {
|
|
if (intel_crtc->unpin_work) {
|
|
/* Before declaring the flip queue wedged, check if
|
|
/* Before declaring the flip queue wedged, check if
|
|
* the hardware completed the operation behind our backs.
|
|
* the hardware completed the operation behind our backs.
|
|
@@ -9981,7 +9828,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
page_flip_completed(intel_crtc);
|
|
page_flip_completed(intel_crtc);
|
|
} else {
|
|
} else {
|
|
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
|
|
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
|
|
- spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev->event_lock);
|
|
|
|
|
|
drm_crtc_vblank_put(crtc);
|
|
drm_crtc_vblank_put(crtc);
|
|
kfree(work);
|
|
kfree(work);
|
|
@@ -9989,7 +9836,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
intel_crtc->unpin_work = work;
|
|
intel_crtc->unpin_work = work;
|
|
- spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev->event_lock);
|
|
|
|
|
|
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
|
|
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
|
|
flush_workqueue(dev_priv->wq);
|
|
flush_workqueue(dev_priv->wq);
|
|
@@ -10076,9 +9923,9 @@ cleanup_pending:
|
|
mutex_unlock(&dev->struct_mutex);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
cleanup:
|
|
cleanup:
|
|
- spin_lock_irqsave(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_lock_irq(&dev->event_lock);
|
|
intel_crtc->unpin_work = NULL;
|
|
intel_crtc->unpin_work = NULL;
|
|
- spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev->event_lock);
|
|
|
|
|
|
drm_crtc_vblank_put(crtc);
|
|
drm_crtc_vblank_put(crtc);
|
|
free_work:
|
|
free_work:
|
|
@@ -10089,9 +9936,9 @@ out_hang:
|
|
intel_crtc_wait_for_pending_flips(crtc);
|
|
intel_crtc_wait_for_pending_flips(crtc);
|
|
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
|
|
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
|
|
if (ret == 0 && event) {
|
|
if (ret == 0 && event) {
|
|
- spin_lock_irqsave(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_lock_irq(&dev->event_lock);
|
|
drm_send_vblank_event(dev, pipe, event);
|
|
drm_send_vblank_event(dev, pipe, event);
|
|
- spin_unlock_irqrestore(&dev->event_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev->event_lock);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return ret;
|
|
return ret;
|
|
@@ -11677,7 +11524,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
|
|
{
|
|
{
|
|
uint32_t val;
|
|
uint32_t val;
|
|
|
|
|
|
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
|
|
|
|
|
|
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
val = I915_READ(PCH_DPLL(pll->id));
|
|
val = I915_READ(PCH_DPLL(pll->id));
|
|
@@ -11811,89 +11658,37 @@ disable_unpin:
|
|
}
|
|
}
|
|
|
|
|
|
static int
|
|
static int
|
|
-intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|
|
|
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
|
|
|
|
- unsigned int crtc_w, unsigned int crtc_h,
|
|
|
|
- uint32_t src_x, uint32_t src_y,
|
|
|
|
- uint32_t src_w, uint32_t src_h)
|
|
|
|
|
|
+intel_check_primary_plane(struct drm_plane *plane,
|
|
|
|
+ struct intel_plane_state *state)
|
|
|
|
+{
|
|
|
|
+ struct drm_crtc *crtc = state->crtc;
|
|
|
|
+ struct drm_framebuffer *fb = state->fb;
|
|
|
|
+ struct drm_rect *dest = &state->dst;
|
|
|
|
+ struct drm_rect *src = &state->src;
|
|
|
|
+ const struct drm_rect *clip = &state->clip;
|
|
|
|
+
|
|
|
|
+ return drm_plane_helper_check_update(plane, crtc, fb,
|
|
|
|
+ src, dest, clip,
|
|
|
|
+ DRM_PLANE_HELPER_NO_SCALING,
|
|
|
|
+ DRM_PLANE_HELPER_NO_SCALING,
|
|
|
|
+ false, true, &state->visible);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+intel_commit_primary_plane(struct drm_plane *plane,
|
|
|
|
+ struct intel_plane_state *state)
|
|
{
|
|
{
|
|
|
|
+ struct drm_crtc *crtc = state->crtc;
|
|
|
|
+ struct drm_framebuffer *fb = state->fb;
|
|
struct drm_device *dev = crtc->dev;
|
|
struct drm_device *dev = crtc->dev;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
|
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
|
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
|
|
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
|
|
- struct drm_rect dest = {
|
|
|
|
- /* integer pixels */
|
|
|
|
- .x1 = crtc_x,
|
|
|
|
- .y1 = crtc_y,
|
|
|
|
- .x2 = crtc_x + crtc_w,
|
|
|
|
- .y2 = crtc_y + crtc_h,
|
|
|
|
- };
|
|
|
|
- struct drm_rect src = {
|
|
|
|
- /* 16.16 fixed point */
|
|
|
|
- .x1 = src_x,
|
|
|
|
- .y1 = src_y,
|
|
|
|
- .x2 = src_x + src_w,
|
|
|
|
- .y2 = src_y + src_h,
|
|
|
|
- };
|
|
|
|
- const struct drm_rect clip = {
|
|
|
|
- /* integer pixels */
|
|
|
|
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
|
|
|
|
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
|
|
|
|
- };
|
|
|
|
- const struct {
|
|
|
|
- int crtc_x, crtc_y;
|
|
|
|
- unsigned int crtc_w, crtc_h;
|
|
|
|
- uint32_t src_x, src_y, src_w, src_h;
|
|
|
|
- } orig = {
|
|
|
|
- .crtc_x = crtc_x,
|
|
|
|
- .crtc_y = crtc_y,
|
|
|
|
- .crtc_w = crtc_w,
|
|
|
|
- .crtc_h = crtc_h,
|
|
|
|
- .src_x = src_x,
|
|
|
|
- .src_y = src_y,
|
|
|
|
- .src_w = src_w,
|
|
|
|
- .src_h = src_h,
|
|
|
|
- };
|
|
|
|
struct intel_plane *intel_plane = to_intel_plane(plane);
|
|
struct intel_plane *intel_plane = to_intel_plane(plane);
|
|
- bool visible;
|
|
|
|
|
|
+ struct drm_rect *src = &state->src;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- ret = drm_plane_helper_check_update(plane, crtc, fb,
|
|
|
|
- &src, &dest, &clip,
|
|
|
|
- DRM_PLANE_HELPER_NO_SCALING,
|
|
|
|
- DRM_PLANE_HELPER_NO_SCALING,
|
|
|
|
- false, true, &visible);
|
|
|
|
-
|
|
|
|
- if (ret)
|
|
|
|
- return ret;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If the CRTC isn't enabled, we're just pinning the framebuffer,
|
|
|
|
- * updating the fb pointer, and returning without touching the
|
|
|
|
- * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
|
|
|
|
- * turn on the display with all planes setup as desired.
|
|
|
|
- */
|
|
|
|
- if (!crtc->enabled) {
|
|
|
|
- mutex_lock(&dev->struct_mutex);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If we already called setplane while the crtc was disabled,
|
|
|
|
- * we may have an fb pinned; unpin it.
|
|
|
|
- */
|
|
|
|
- if (plane->fb)
|
|
|
|
- intel_unpin_fb_obj(old_obj);
|
|
|
|
-
|
|
|
|
- i915_gem_track_fb(old_obj, obj,
|
|
|
|
- INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
|
|
|
|
-
|
|
|
|
- /* Pin and return without programming hardware */
|
|
|
|
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
|
|
|
|
- mutex_unlock(&dev->struct_mutex);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
intel_crtc_wait_for_pending_flips(crtc);
|
|
intel_crtc_wait_for_pending_flips(crtc);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -11902,7 +11697,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|
* happens if userspace explicitly disables the plane by passing fb=0
|
|
* happens if userspace explicitly disables the plane by passing fb=0
|
|
* because plane->fb still gets set and pinned.
|
|
* because plane->fb still gets set and pinned.
|
|
*/
|
|
*/
|
|
- if (!visible) {
|
|
|
|
|
|
+ if (!state->visible) {
|
|
mutex_lock(&dev->struct_mutex);
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -11949,7 +11744,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|
intel_disable_fbc(dev);
|
|
intel_disable_fbc(dev);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
|
|
|
|
|
|
+ ret = intel_pipe_set_base(crtc, src->x1, src->y1, fb);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
@@ -11957,19 +11752,62 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|
intel_enable_primary_hw_plane(plane, crtc);
|
|
intel_enable_primary_hw_plane(plane, crtc);
|
|
}
|
|
}
|
|
|
|
|
|
- intel_plane->crtc_x = orig.crtc_x;
|
|
|
|
- intel_plane->crtc_y = orig.crtc_y;
|
|
|
|
- intel_plane->crtc_w = orig.crtc_w;
|
|
|
|
- intel_plane->crtc_h = orig.crtc_h;
|
|
|
|
- intel_plane->src_x = orig.src_x;
|
|
|
|
- intel_plane->src_y = orig.src_y;
|
|
|
|
- intel_plane->src_w = orig.src_w;
|
|
|
|
- intel_plane->src_h = orig.src_h;
|
|
|
|
|
|
+ intel_plane->crtc_x = state->orig_dst.x1;
|
|
|
|
+ intel_plane->crtc_y = state->orig_dst.y1;
|
|
|
|
+ intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
|
|
|
|
+ intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
|
|
|
|
+ intel_plane->src_x = state->orig_src.x1;
|
|
|
|
+ intel_plane->src_y = state->orig_src.y1;
|
|
|
|
+ intel_plane->src_w = drm_rect_width(&state->orig_src);
|
|
|
|
+ intel_plane->src_h = drm_rect_height(&state->orig_src);
|
|
intel_plane->obj = obj;
|
|
intel_plane->obj = obj;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int
|
|
|
|
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|
|
|
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
|
|
|
|
+ unsigned int crtc_w, unsigned int crtc_h,
|
|
|
|
+ uint32_t src_x, uint32_t src_y,
|
|
|
|
+ uint32_t src_w, uint32_t src_h)
|
|
|
|
+{
|
|
|
|
+ struct intel_plane_state state;
|
|
|
|
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ state.crtc = crtc;
|
|
|
|
+ state.fb = fb;
|
|
|
|
+
|
|
|
|
+ /* sample coordinates in 16.16 fixed point */
|
|
|
|
+ state.src.x1 = src_x;
|
|
|
|
+ state.src.x2 = src_x + src_w;
|
|
|
|
+ state.src.y1 = src_y;
|
|
|
|
+ state.src.y2 = src_y + src_h;
|
|
|
|
+
|
|
|
|
+ /* integer pixels */
|
|
|
|
+ state.dst.x1 = crtc_x;
|
|
|
|
+ state.dst.x2 = crtc_x + crtc_w;
|
|
|
|
+ state.dst.y1 = crtc_y;
|
|
|
|
+ state.dst.y2 = crtc_y + crtc_h;
|
|
|
|
+
|
|
|
|
+ state.clip.x1 = 0;
|
|
|
|
+ state.clip.y1 = 0;
|
|
|
|
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
|
|
|
|
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
|
|
|
|
+
|
|
|
|
+ state.orig_src = state.src;
|
|
|
|
+ state.orig_dst = state.dst;
|
|
|
|
+
|
|
|
|
+ ret = intel_check_primary_plane(plane, &state);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ intel_commit_primary_plane(plane, &state);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
/* Common destruction function for both primary and cursor planes */
|
|
/* Common destruction function for both primary and cursor planes */
|
|
static void intel_plane_destroy(struct drm_plane *plane)
|
|
static void intel_plane_destroy(struct drm_plane *plane)
|
|
{
|
|
{
|
|
@@ -12044,51 +11882,41 @@ intel_cursor_plane_disable(struct drm_plane *plane)
|
|
}
|
|
}
|
|
|
|
|
|
static int
|
|
static int
|
|
-intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|
|
|
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
|
|
|
|
- unsigned int crtc_w, unsigned int crtc_h,
|
|
|
|
- uint32_t src_x, uint32_t src_y,
|
|
|
|
- uint32_t src_w, uint32_t src_h)
|
|
|
|
|
|
+intel_check_cursor_plane(struct drm_plane *plane,
|
|
|
|
+ struct intel_plane_state *state)
|
|
{
|
|
{
|
|
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
|
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
|
|
|
- struct drm_i915_gem_object *obj = intel_fb->obj;
|
|
|
|
- struct drm_rect dest = {
|
|
|
|
- /* integer pixels */
|
|
|
|
- .x1 = crtc_x,
|
|
|
|
- .y1 = crtc_y,
|
|
|
|
- .x2 = crtc_x + crtc_w,
|
|
|
|
- .y2 = crtc_y + crtc_h,
|
|
|
|
- };
|
|
|
|
- struct drm_rect src = {
|
|
|
|
- /* 16.16 fixed point */
|
|
|
|
- .x1 = src_x,
|
|
|
|
- .y1 = src_y,
|
|
|
|
- .x2 = src_x + src_w,
|
|
|
|
- .y2 = src_y + src_h,
|
|
|
|
- };
|
|
|
|
- const struct drm_rect clip = {
|
|
|
|
- /* integer pixels */
|
|
|
|
- .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
|
|
|
|
- .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
|
|
|
|
- };
|
|
|
|
- bool visible;
|
|
|
|
- int ret;
|
|
|
|
|
|
+ struct drm_crtc *crtc = state->crtc;
|
|
|
|
+ struct drm_framebuffer *fb = state->fb;
|
|
|
|
+ struct drm_rect *dest = &state->dst;
|
|
|
|
+ struct drm_rect *src = &state->src;
|
|
|
|
+ const struct drm_rect *clip = &state->clip;
|
|
|
|
|
|
- ret = drm_plane_helper_check_update(plane, crtc, fb,
|
|
|
|
- &src, &dest, &clip,
|
|
|
|
|
|
+ return drm_plane_helper_check_update(plane, crtc, fb,
|
|
|
|
+ src, dest, clip,
|
|
DRM_PLANE_HELPER_NO_SCALING,
|
|
DRM_PLANE_HELPER_NO_SCALING,
|
|
DRM_PLANE_HELPER_NO_SCALING,
|
|
DRM_PLANE_HELPER_NO_SCALING,
|
|
- true, true, &visible);
|
|
|
|
- if (ret)
|
|
|
|
- return ret;
|
|
|
|
|
|
+ true, true, &state->visible);
|
|
|
|
+}
|
|
|
|
|
|
- crtc->cursor_x = crtc_x;
|
|
|
|
- crtc->cursor_y = crtc_y;
|
|
|
|
|
|
+static int
|
|
|
|
+intel_commit_cursor_plane(struct drm_plane *plane,
|
|
|
|
+ struct intel_plane_state *state)
|
|
|
|
+{
|
|
|
|
+ struct drm_crtc *crtc = state->crtc;
|
|
|
|
+ struct drm_framebuffer *fb = state->fb;
|
|
|
|
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
|
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
|
|
|
+ struct drm_i915_gem_object *obj = intel_fb->obj;
|
|
|
|
+ int crtc_w, crtc_h;
|
|
|
|
+
|
|
|
|
+ crtc->cursor_x = state->orig_dst.x1;
|
|
|
|
+ crtc->cursor_y = state->orig_dst.y1;
|
|
if (fb != crtc->cursor->fb) {
|
|
if (fb != crtc->cursor->fb) {
|
|
|
|
+ crtc_w = drm_rect_width(&state->orig_dst);
|
|
|
|
+ crtc_h = drm_rect_height(&state->orig_dst);
|
|
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
|
|
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
|
|
} else {
|
|
} else {
|
|
- intel_crtc_update_cursor(crtc, visible);
|
|
|
|
|
|
+ intel_crtc_update_cursor(crtc, state->visible);
|
|
|
|
|
|
intel_frontbuffer_flip(crtc->dev,
|
|
intel_frontbuffer_flip(crtc->dev,
|
|
INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
|
|
INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
|
|
@@ -12096,6 +11924,48 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|
|
|
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
|
|
|
|
+ unsigned int crtc_w, unsigned int crtc_h,
|
|
|
|
+ uint32_t src_x, uint32_t src_y,
|
|
|
|
+ uint32_t src_w, uint32_t src_h)
|
|
|
|
+{
|
|
|
|
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
|
+ struct intel_plane_state state;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ state.crtc = crtc;
|
|
|
|
+ state.fb = fb;
|
|
|
|
+
|
|
|
|
+ /* sample coordinates in 16.16 fixed point */
|
|
|
|
+ state.src.x1 = src_x;
|
|
|
|
+ state.src.x2 = src_x + src_w;
|
|
|
|
+ state.src.y1 = src_y;
|
|
|
|
+ state.src.y2 = src_y + src_h;
|
|
|
|
+
|
|
|
|
+ /* integer pixels */
|
|
|
|
+ state.dst.x1 = crtc_x;
|
|
|
|
+ state.dst.x2 = crtc_x + crtc_w;
|
|
|
|
+ state.dst.y1 = crtc_y;
|
|
|
|
+ state.dst.y2 = crtc_y + crtc_h;
|
|
|
|
+
|
|
|
|
+ state.clip.x1 = 0;
|
|
|
|
+ state.clip.y1 = 0;
|
|
|
|
+ state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
|
|
|
|
+ state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
|
|
|
|
+
|
|
|
|
+ state.orig_src = state.src;
|
|
|
|
+ state.orig_dst = state.dst;
|
|
|
|
+
|
|
|
|
+ ret = intel_check_cursor_plane(plane, &state);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ return intel_commit_cursor_plane(plane, &state);
|
|
|
|
+}
|
|
|
|
+
|
|
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
|
|
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
|
|
.update_plane = intel_cursor_plane_update,
|
|
.update_plane = intel_cursor_plane_update,
|
|
.disable_plane = intel_cursor_plane_disable,
|
|
.disable_plane = intel_cursor_plane_disable,
|
|
@@ -12284,6 +12154,9 @@ static bool intel_crt_present(struct drm_device *dev)
|
|
{
|
|
{
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
|
|
|
|
|
+ if (INTEL_INFO(dev)->gen >= 9)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
if (IS_ULT(dev))
|
|
if (IS_ULT(dev))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
@@ -12636,8 +12509,12 @@ static void intel_init_display(struct drm_device *dev)
|
|
dev_priv->display.crtc_enable = haswell_crtc_enable;
|
|
dev_priv->display.crtc_enable = haswell_crtc_enable;
|
|
dev_priv->display.crtc_disable = haswell_crtc_disable;
|
|
dev_priv->display.crtc_disable = haswell_crtc_disable;
|
|
dev_priv->display.off = ironlake_crtc_off;
|
|
dev_priv->display.off = ironlake_crtc_off;
|
|
- dev_priv->display.update_primary_plane =
|
|
|
|
- ironlake_update_primary_plane;
|
|
|
|
|
|
+ if (INTEL_INFO(dev)->gen >= 9)
|
|
|
|
+ dev_priv->display.update_primary_plane =
|
|
|
|
+ skylake_update_primary_plane;
|
|
|
|
+ else
|
|
|
|
+ dev_priv->display.update_primary_plane =
|
|
|
|
+ ironlake_update_primary_plane;
|
|
} else if (HAS_PCH_SPLIT(dev)) {
|
|
} else if (HAS_PCH_SPLIT(dev)) {
|
|
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
|
|
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
|
|
dev_priv->display.get_plane_config = ironlake_get_plane_config;
|
|
dev_priv->display.get_plane_config = ironlake_get_plane_config;
|
|
@@ -12721,6 +12598,10 @@ static void intel_init_display(struct drm_device *dev)
|
|
dev_priv->display.modeset_global_resources =
|
|
dev_priv->display.modeset_global_resources =
|
|
valleyview_modeset_global_resources;
|
|
valleyview_modeset_global_resources;
|
|
dev_priv->display.write_eld = ironlake_write_eld;
|
|
dev_priv->display.write_eld = ironlake_write_eld;
|
|
|
|
+ } else if (INTEL_INFO(dev)->gen >= 9) {
|
|
|
|
+ dev_priv->display.write_eld = haswell_write_eld;
|
|
|
|
+ dev_priv->display.modeset_global_resources =
|
|
|
|
+ haswell_modeset_global_resources;
|
|
}
|
|
}
|
|
|
|
|
|
/* Default just returns -ENODEV to indicate unsupported */
|
|
/* Default just returns -ENODEV to indicate unsupported */
|
|
@@ -12948,11 +12829,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
|
|
intel_enable_gt_powersave(dev);
|
|
intel_enable_gt_powersave(dev);
|
|
}
|
|
}
|
|
|
|
|
|
-void intel_modeset_suspend_hw(struct drm_device *dev)
|
|
|
|
-{
|
|
|
|
- intel_suspend_hw(dev);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
void intel_modeset_init(struct drm_device *dev)
|
|
void intel_modeset_init(struct drm_device *dev)
|
|
{
|
|
{
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
@@ -13288,7 +13164,7 @@ void i915_redisable_vga(struct drm_device *dev)
|
|
* level, just check if the power well is enabled instead of trying to
|
|
* level, just check if the power well is enabled instead of trying to
|
|
* follow the "don't touch the power well if we don't need it" policy
|
|
* follow the "don't touch the power well if we don't need it" policy
|
|
* the rest of the driver uses. */
|
|
* the rest of the driver uses. */
|
|
- if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
|
|
|
|
|
|
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
|
|
return;
|
|
return;
|
|
|
|
|
|
i915_redisable_vga_power_on(dev);
|
|
i915_redisable_vga_power_on(dev);
|
|
@@ -13509,9 +13385,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|
* Too much stuff here (turning of rps, connectors, ...) would
|
|
* Too much stuff here (turning of rps, connectors, ...) would
|
|
* experience fancy races otherwise.
|
|
* experience fancy races otherwise.
|
|
*/
|
|
*/
|
|
- drm_irq_uninstall(dev);
|
|
|
|
- intel_hpd_cancel_work(dev_priv);
|
|
|
|
- dev_priv->pm._irqs_disabled = true;
|
|
|
|
|
|
+ intel_irq_uninstall(dev_priv);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Due to the hpd irq storm handling the hotplug work can re-arm the
|
|
* Due to the hpd irq storm handling the hotplug work can re-arm the
|
|
@@ -13666,8 +13540,8 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|
|
|
|
|
for_each_pipe(dev_priv, i) {
|
|
for_each_pipe(dev_priv, i) {
|
|
error->pipe[i].power_domain_on =
|
|
error->pipe[i].power_domain_on =
|
|
- intel_display_power_enabled_unlocked(dev_priv,
|
|
|
|
- POWER_DOMAIN_PIPE(i));
|
|
|
|
|
|
+ __intel_display_power_is_enabled(dev_priv,
|
|
|
|
+ POWER_DOMAIN_PIPE(i));
|
|
if (!error->pipe[i].power_domain_on)
|
|
if (!error->pipe[i].power_domain_on)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
@@ -13702,7 +13576,7 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|
enum transcoder cpu_transcoder = transcoders[i];
|
|
enum transcoder cpu_transcoder = transcoders[i];
|
|
|
|
|
|
error->transcoder[i].power_domain_on =
|
|
error->transcoder[i].power_domain_on =
|
|
- intel_display_power_enabled_unlocked(dev_priv,
|
|
|
|
|
|
+ __intel_display_power_is_enabled(dev_priv,
|
|
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
|
|
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
|
|
if (!error->transcoder[i].power_domain_on)
|
|
if (!error->transcoder[i].power_domain_on)
|
|
continue;
|
|
continue;
|
|
@@ -13786,9 +13660,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
|
|
|
|
|
|
for_each_intel_crtc(dev, crtc) {
|
|
for_each_intel_crtc(dev, crtc) {
|
|
struct intel_unpin_work *work;
|
|
struct intel_unpin_work *work;
|
|
- unsigned long irqflags;
|
|
|
|
|
|
|
|
- spin_lock_irqsave(&dev->event_lock, irqflags);
|
|
|
|
|
|
+ spin_lock_irq(&dev->event_lock);
|
|
|
|
|
|
work = crtc->unpin_work;
|
|
work = crtc->unpin_work;
|
|
|
|
|
|
@@ -13798,6 +13671,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
|
|
work->event = NULL;
|
|
work->event = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
|
|
|
|
|
|
+ spin_unlock_irq(&dev->event_lock);
|
|
}
|
|
}
|
|
}
|
|
}
|