|
@@ -1,5 +1,5 @@
|
|
/*
|
|
/*
|
|
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
|
|
|
|
|
|
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
|
|
*
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
@@ -35,18 +35,16 @@
|
|
|
|
|
|
struct op_mode {
|
|
struct op_mode {
|
|
struct mdp5_interface intf;
|
|
struct mdp5_interface intf;
|
|
- /*
|
|
|
|
- * TODO: add a state variable to control the pipeline
|
|
|
|
- *
|
|
|
|
- * eg: WB interface needs both buffer addresses to be committed +
|
|
|
|
- * output buffers ready to be written into, before we can START.
|
|
|
|
- */
|
|
|
|
|
|
+
|
|
|
|
+ bool encoder_enabled;
|
|
|
|
+ uint32_t start_mask;
|
|
};
|
|
};
|
|
|
|
|
|
struct mdp5_ctl {
|
|
struct mdp5_ctl {
|
|
struct mdp5_ctl_manager *ctlm;
|
|
struct mdp5_ctl_manager *ctlm;
|
|
|
|
|
|
u32 id;
|
|
u32 id;
|
|
|
|
+ int lm;
|
|
|
|
|
|
/* whether this CTL has been allocated or not: */
|
|
/* whether this CTL has been allocated or not: */
|
|
bool busy;
|
|
bool busy;
|
|
@@ -58,8 +56,8 @@ struct mdp5_ctl {
|
|
spinlock_t hw_lock;
|
|
spinlock_t hw_lock;
|
|
u32 reg_offset;
|
|
u32 reg_offset;
|
|
|
|
|
|
- /* flush mask used to commit CTL registers */
|
|
|
|
- u32 flush_mask;
|
|
|
|
|
|
+ /* when do CTL registers need to be flushed? (mask of trigger bits) */
|
|
|
|
+ u32 pending_ctl_trigger;
|
|
|
|
|
|
bool cursor_on;
|
|
bool cursor_on;
|
|
|
|
|
|
@@ -73,6 +71,9 @@ struct mdp5_ctl_manager {
|
|
u32 nlm;
|
|
u32 nlm;
|
|
u32 nctl;
|
|
u32 nctl;
|
|
|
|
|
|
|
|
+ /* to filter out non-present bits in the current hardware config */
|
|
|
|
+ u32 flush_hw_mask;
|
|
|
|
+
|
|
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
|
|
/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
|
|
spinlock_t pool_lock;
|
|
spinlock_t pool_lock;
|
|
struct mdp5_ctl ctls[MAX_CTL];
|
|
struct mdp5_ctl ctls[MAX_CTL];
|
|
@@ -174,6 +175,9 @@ int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
|
|
|
|
|
|
memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
|
|
memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
|
|
|
|
|
|
|
|
+ ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
|
|
|
|
+ mdp_ctl_flush_mask_encoder(intf);
|
|
|
|
+
|
|
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
|
|
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
|
|
if (!mdp5_cfg_intf_is_virtual(intf->type))
|
|
if (!mdp5_cfg_intf_is_virtual(intf->type))
|
|
set_display_intf(mdp5_kms, intf);
|
|
set_display_intf(mdp5_kms, intf);
|
|
@@ -183,14 +187,90 @@ int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
|
|
|
|
|
|
+static bool start_signal_needed(struct mdp5_ctl *ctl)
|
|
|
|
+{
|
|
|
|
+ struct op_mode *pipeline = &ctl->pipeline;
|
|
|
|
+
|
|
|
|
+ if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ switch (pipeline->intf.type) {
|
|
|
|
+ case INTF_WB:
|
|
|
|
+ return true;
|
|
|
|
+ case INTF_DSI:
|
|
|
|
+ return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
|
|
|
|
+ default:
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * send_start_signal() - Overlay Processor Start Signal
|
|
|
|
+ *
|
|
|
|
+ * For a given control operation (display pipeline), a START signal needs to be
|
|
|
|
+ * executed in order to kick off operation and activate all layers.
|
|
|
|
+ * e.g.: DSI command mode, Writeback
|
|
|
|
+ */
|
|
|
|
+static void send_start_signal(struct mdp5_ctl *ctl)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ctl->hw_lock, flags);
|
|
|
|
+ ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
|
|
|
|
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void refill_start_mask(struct mdp5_ctl *ctl)
|
|
|
|
+{
|
|
|
|
+ struct op_mode *pipeline = &ctl->pipeline;
|
|
|
|
+ struct mdp5_interface *intf = &ctl->pipeline.intf;
|
|
|
|
+
|
|
|
|
+ pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Writeback encoder needs to program & flush
|
|
|
|
+ * address registers for each page flip..
|
|
|
|
+ */
|
|
|
|
+ if (intf->type == INTF_WB)
|
|
|
|
+ pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * mdp5_ctl_set_encoder_state() - set the encoder state
|
|
|
|
+ *
|
|
|
|
+ * @enable: true, when encoder is ready for data streaming; false, otherwise.
|
|
|
|
+ *
|
|
|
|
+ * Note:
|
|
|
|
+ * This encoder state is needed to trigger START signal (data path kickoff).
|
|
|
|
+ */
|
|
|
|
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
|
|
|
|
+{
|
|
|
|
+ if (WARN_ON(!ctl))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ ctl->pipeline.encoder_enabled = enabled;
|
|
|
|
+ DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
|
|
|
|
+
|
|
|
|
+ if (start_signal_needed(ctl)) {
|
|
|
|
+ send_start_signal(ctl);
|
|
|
|
+ refill_start_mask(ctl);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Note:
|
|
|
|
+ * CTL registers need to be flushed after calling this function
|
|
|
|
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
|
|
|
|
+ */
|
|
|
|
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
|
|
{
|
|
{
|
|
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
|
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
u32 blend_cfg;
|
|
u32 blend_cfg;
|
|
- int lm;
|
|
|
|
|
|
+ int lm = ctl->lm;
|
|
|
|
|
|
- lm = mdp5_crtc_get_lm(ctl->crtc);
|
|
|
|
if (unlikely(WARN_ON(lm < 0))) {
|
|
if (unlikely(WARN_ON(lm < 0))) {
|
|
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
|
|
dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
|
|
ctl->id, lm);
|
|
ctl->id, lm);
|
|
@@ -210,12 +290,12 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
|
|
|
|
|
|
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
|
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
|
|
|
|
|
|
|
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
|
|
ctl->cursor_on = enable;
|
|
ctl->cursor_on = enable;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
|
|
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
@@ -229,37 +309,133 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
|
|
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
|
|
ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
|
|
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
|
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
|
|
|
|
|
|
|
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
|
|
|
|
+{
|
|
|
|
+ /* these are dummy bits for now, but will appear in next chipsets: */
|
|
|
|
+#define MDP5_CTL_FLUSH_TIMING_0 0x80000000
|
|
|
|
+#define MDP5_CTL_FLUSH_TIMING_1 0x40000000
|
|
|
|
+#define MDP5_CTL_FLUSH_TIMING_2 0x20000000
|
|
|
|
+#define MDP5_CTL_FLUSH_TIMING_3 0x10000000
|
|
|
|
+#define MDP5_CTL_FLUSH_WB 0x00010000
|
|
|
|
+
|
|
|
|
+ if (intf->type == INTF_WB)
|
|
|
|
+ return MDP5_CTL_FLUSH_WB;
|
|
|
|
+
|
|
|
|
+ switch (intf->num) {
|
|
|
|
+ case 0: return MDP5_CTL_FLUSH_TIMING_0;
|
|
|
|
+ case 1: return MDP5_CTL_FLUSH_TIMING_1;
|
|
|
|
+ case 2: return MDP5_CTL_FLUSH_TIMING_2;
|
|
|
|
+ case 3: return MDP5_CTL_FLUSH_TIMING_3;
|
|
|
|
+ default: return 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+u32 mdp_ctl_flush_mask_cursor(int cursor_id)
|
|
|
|
+{
|
|
|
|
+ /* these are dummy bits for now, but will appear in next chipsets: */
|
|
|
|
+#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000
|
|
|
|
+#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000
|
|
|
|
+
|
|
|
|
+ switch (cursor_id) {
|
|
|
|
+ case 0: return MDP5_CTL_FLUSH_CURSOR_0;
|
|
|
|
+ case 1: return MDP5_CTL_FLUSH_CURSOR_1;
|
|
|
|
+ default: return 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
|
|
|
|
+{
|
|
|
|
+ switch (pipe) {
|
|
|
|
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
|
|
|
|
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
|
|
|
|
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
|
|
|
|
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
|
|
|
|
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
|
|
|
|
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
|
|
|
|
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
|
|
|
|
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
|
|
|
|
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
|
|
|
|
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
|
|
|
|
+ default: return 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+u32 mdp_ctl_flush_mask_lm(int lm)
|
|
|
|
+{
|
|
|
|
+ switch (lm) {
|
|
|
|
+ case 0: return MDP5_CTL_FLUSH_LM0;
|
|
|
|
+ case 1: return MDP5_CTL_FLUSH_LM1;
|
|
|
|
+ case 2: return MDP5_CTL_FLUSH_LM2;
|
|
|
|
+ case 5: return MDP5_CTL_FLUSH_LM5;
|
|
|
|
+ default: return 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
|
|
|
|
+{
|
|
|
|
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
|
|
|
+ u32 sw_mask = 0;
|
|
|
|
+#define BIT_NEEDS_SW_FIX(bit) \
|
|
|
|
+ (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
|
|
|
|
+
|
|
|
|
+ /* for some targets, cursor bit is the same as LM bit */
|
|
|
|
+ if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
|
|
|
|
+ sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
|
|
|
|
+
|
|
|
|
+ return sw_mask;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * mdp5_ctl_commit() - Register Flush
|
|
|
|
+ *
|
|
|
|
+ * The flush register is used to indicate several registers are all
|
|
|
|
+ * programmed, and are safe to update to the back copy of the double
|
|
|
|
+ * buffered registers.
|
|
|
|
+ *
|
|
|
|
+ * Some registers FLUSH bits are shared when the hardware does not have
|
|
|
|
+ * dedicated bits for them; handling these is the job of fix_sw_flush().
|
|
|
|
+ *
|
|
|
|
+ * CTL registers need to be flushed in some circumstances; if that is the
|
|
|
|
+ * case, some trigger bits will be present in both flush mask and
|
|
|
|
+ * ctl->pending_ctl_trigger.
|
|
|
|
+ */
|
|
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
|
|
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
|
|
{
|
|
{
|
|
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
|
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
|
|
|
+ struct op_mode *pipeline = &ctl->pipeline;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
|
|
|
|
- int lm = mdp5_crtc_get_lm(ctl->crtc);
|
|
|
|
|
|
+ pipeline->start_mask &= ~flush_mask;
|
|
|
|
|
|
- if (unlikely(WARN_ON(lm < 0))) {
|
|
|
|
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
|
|
|
|
- ctl->id, lm);
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
|
|
+ VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
|
|
|
|
+ pipeline->start_mask, ctl->pending_ctl_trigger);
|
|
|
|
|
|
- /* for current targets, cursor bit is the same as LM bit */
|
|
|
|
- flush_mask |= mdp_ctl_flush_mask_lm(lm);
|
|
|
|
|
|
+ if (ctl->pending_ctl_trigger & flush_mask) {
|
|
|
|
+ flush_mask |= MDP5_CTL_FLUSH_CTL;
|
|
|
|
+ ctl->pending_ctl_trigger = 0;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_lock_irqsave(&ctl->hw_lock, flags);
|
|
|
|
- ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
|
|
|
|
- spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
|
|
|
|
|
+ flush_mask |= fix_sw_flush(ctl, flush_mask);
|
|
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
|
|
+ flush_mask &= ctl_mgr->flush_hw_mask;
|
|
|
|
|
|
-u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
|
|
|
|
-{
|
|
|
|
- return ctl->flush_mask;
|
|
|
|
|
|
+ if (flush_mask) {
|
|
|
|
+ spin_lock_irqsave(&ctl->hw_lock, flags);
|
|
|
|
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
|
|
|
|
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (start_signal_needed(ctl)) {
|
|
|
|
+ send_start_signal(ctl);
|
|
|
|
+ refill_start_mask(ctl);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
void mdp5_ctl_release(struct mdp5_ctl *ctl)
|
|
void mdp5_ctl_release(struct mdp5_ctl *ctl)
|
|
@@ -280,6 +456,11 @@ void mdp5_ctl_release(struct mdp5_ctl *ctl)
|
|
DBG("CTL %d released", ctl->id);
|
|
DBG("CTL %d released", ctl->id);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
|
|
|
|
+{
|
|
|
|
+ return WARN_ON(!ctl) ? -EINVAL : ctl->id;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* mdp5_ctl_request() - CTL dynamic allocation
|
|
* mdp5_ctl_request() - CTL dynamic allocation
|
|
*
|
|
*
|
|
@@ -307,8 +488,10 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
|
|
|
|
|
|
ctl = &ctl_mgr->ctls[c];
|
|
ctl = &ctl_mgr->ctls[c];
|
|
|
|
|
|
|
|
+ ctl->lm = mdp5_crtc_get_lm(crtc);
|
|
ctl->crtc = crtc;
|
|
ctl->crtc = crtc;
|
|
ctl->busy = true;
|
|
ctl->busy = true;
|
|
|
|
+ ctl->pending_ctl_trigger = 0;
|
|
DBG("CTL %d allocated", ctl->id);
|
|
DBG("CTL %d allocated", ctl->id);
|
|
|
|
|
|
unlock:
|
|
unlock:
|
|
@@ -339,7 +522,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
|
|
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
|
|
void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
|
|
{
|
|
{
|
|
struct mdp5_ctl_manager *ctl_mgr;
|
|
struct mdp5_ctl_manager *ctl_mgr;
|
|
- const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
|
|
|
|
|
|
+ const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
int c, ret;
|
|
int c, ret;
|
|
|
|
|
|
@@ -361,6 +544,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
|
|
ctl_mgr->dev = dev;
|
|
ctl_mgr->dev = dev;
|
|
ctl_mgr->nlm = hw_cfg->lm.count;
|
|
ctl_mgr->nlm = hw_cfg->lm.count;
|
|
ctl_mgr->nctl = ctl_cfg->count;
|
|
ctl_mgr->nctl = ctl_cfg->count;
|
|
|
|
+ ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
|
|
spin_lock_init(&ctl_mgr->pool_lock);
|
|
spin_lock_init(&ctl_mgr->pool_lock);
|
|
|
|
|
|
/* initialize each CTL of the pool: */
|
|
/* initialize each CTL of the pool: */
|
|
@@ -376,7 +560,6 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
|
|
ctl->ctlm = ctl_mgr;
|
|
ctl->ctlm = ctl_mgr;
|
|
ctl->id = c;
|
|
ctl->id = c;
|
|
ctl->reg_offset = ctl_cfg->base[c];
|
|
ctl->reg_offset = ctl_cfg->base[c];
|
|
- ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
|
|
|
|
ctl->busy = false;
|
|
ctl->busy = false;
|
|
spin_lock_init(&ctl->hw_lock);
|
|
spin_lock_init(&ctl->hw_lock);
|
|
}
|
|
}
|