|
@@ -33,97 +33,18 @@ struct tegra_drm_file {
|
|
struct mutex lock;
|
|
struct mutex lock;
|
|
};
|
|
};
|
|
|
|
|
|
-static void tegra_atomic_schedule(struct tegra_drm *tegra,
|
|
|
|
- struct drm_atomic_state *state)
|
|
|
|
-{
|
|
|
|
- tegra->commit.state = state;
|
|
|
|
- schedule_work(&tegra->commit.work);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void tegra_atomic_complete(struct tegra_drm *tegra,
|
|
|
|
- struct drm_atomic_state *state)
|
|
|
|
-{
|
|
|
|
- struct drm_device *drm = tegra->drm;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Everything below can be run asynchronously without the need to grab
|
|
|
|
- * any modeset locks at all under one condition: It must be guaranteed
|
|
|
|
- * that the asynchronous work has either been cancelled (if the driver
|
|
|
|
- * supports it, which at least requires that the framebuffers get
|
|
|
|
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
|
|
|
|
- * before the new state gets committed on the software side with
|
|
|
|
- * drm_atomic_helper_swap_state().
|
|
|
|
- *
|
|
|
|
- * This scheme allows new atomic state updates to be prepared and
|
|
|
|
- * checked in parallel to the asynchronous completion of the previous
|
|
|
|
- * update. Which is important since compositors need to figure out the
|
|
|
|
- * composition of the next frame right after having submitted the
|
|
|
|
- * current layout.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
- drm_atomic_helper_commit_modeset_disables(drm, state);
|
|
|
|
- drm_atomic_helper_commit_modeset_enables(drm, state);
|
|
|
|
- drm_atomic_helper_commit_planes(drm, state,
|
|
|
|
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
|
|
|
|
-
|
|
|
|
- drm_atomic_helper_wait_for_vblanks(drm, state);
|
|
|
|
-
|
|
|
|
- drm_atomic_helper_cleanup_planes(drm, state);
|
|
|
|
- drm_atomic_state_put(state);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void tegra_atomic_work(struct work_struct *work)
|
|
|
|
-{
|
|
|
|
- struct tegra_drm *tegra = container_of(work, struct tegra_drm,
|
|
|
|
- commit.work);
|
|
|
|
-
|
|
|
|
- tegra_atomic_complete(tegra, tegra->commit.state);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int tegra_atomic_commit(struct drm_device *drm,
|
|
|
|
- struct drm_atomic_state *state, bool nonblock)
|
|
|
|
-{
|
|
|
|
- struct tegra_drm *tegra = drm->dev_private;
|
|
|
|
- int err;
|
|
|
|
-
|
|
|
|
- err = drm_atomic_helper_prepare_planes(drm, state);
|
|
|
|
- if (err)
|
|
|
|
- return err;
|
|
|
|
-
|
|
|
|
- /* serialize outstanding nonblocking commits */
|
|
|
|
- mutex_lock(&tegra->commit.lock);
|
|
|
|
- flush_work(&tegra->commit.work);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * This is the point of no return - everything below never fails except
|
|
|
|
- * when the hw goes bonghits. Which means we can commit the new state on
|
|
|
|
- * the software side now.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
- err = drm_atomic_helper_swap_state(state, true);
|
|
|
|
- if (err) {
|
|
|
|
- mutex_unlock(&tegra->commit.lock);
|
|
|
|
- drm_atomic_helper_cleanup_planes(drm, state);
|
|
|
|
- return err;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- drm_atomic_state_get(state);
|
|
|
|
- if (nonblock)
|
|
|
|
- tegra_atomic_schedule(tegra, state);
|
|
|
|
- else
|
|
|
|
- tegra_atomic_complete(tegra, state);
|
|
|
|
-
|
|
|
|
- mutex_unlock(&tegra->commit.lock);
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
|
|
|
|
|
|
+static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
|
|
.fb_create = tegra_fb_create,
|
|
.fb_create = tegra_fb_create,
|
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
|
.output_poll_changed = tegra_fb_output_poll_changed,
|
|
.output_poll_changed = tegra_fb_output_poll_changed,
|
|
#endif
|
|
#endif
|
|
.atomic_check = drm_atomic_helper_check,
|
|
.atomic_check = drm_atomic_helper_check,
|
|
- .atomic_commit = tegra_atomic_commit,
|
|
|
|
|
|
+ .atomic_commit = drm_atomic_helper_commit,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static const struct drm_mode_config_helper_funcs
|
|
|
|
+tegra_drm_mode_config_helpers = {
|
|
|
|
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
|
|
};
|
|
};
|
|
|
|
|
|
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
|
|
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
|
|
@@ -172,9 +93,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
|
|
mutex_init(&tegra->clients_lock);
|
|
mutex_init(&tegra->clients_lock);
|
|
INIT_LIST_HEAD(&tegra->clients);
|
|
INIT_LIST_HEAD(&tegra->clients);
|
|
|
|
|
|
- mutex_init(&tegra->commit.lock);
|
|
|
|
- INIT_WORK(&tegra->commit.work, tegra_atomic_work);
|
|
|
|
-
|
|
|
|
drm->dev_private = tegra;
|
|
drm->dev_private = tegra;
|
|
tegra->drm = drm;
|
|
tegra->drm = drm;
|
|
|
|
|
|
@@ -188,7 +106,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
|
|
|
|
|
|
drm->mode_config.allow_fb_modifiers = true;
|
|
drm->mode_config.allow_fb_modifiers = true;
|
|
|
|
|
|
- drm->mode_config.funcs = &tegra_drm_mode_funcs;
|
|
|
|
|
|
+ drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
|
|
|
|
+ drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
|
|
|
|
|
|
err = tegra_drm_fb_prepare(drm);
|
|
err = tegra_drm_fb_prepare(drm);
|
|
if (err < 0)
|
|
if (err < 0)
|