|
@@ -60,8 +60,6 @@ struct mdp5_ctl {
|
|
|
u32 pending_ctl_trigger;
|
|
|
|
|
|
bool cursor_on;
|
|
|
-
|
|
|
- struct drm_crtc *crtc;
|
|
|
};
|
|
|
|
|
|
struct mdp5_ctl_manager {
|
|
@@ -168,11 +166,21 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
|
|
|
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
|
|
}
|
|
|
|
|
|
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
|
|
|
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
|
|
|
+ struct mdp5_interface *intf, int lm)
|
|
|
{
|
|
|
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
|
|
struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
|
|
|
|
|
|
+ if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
|
|
|
+ dev_err(mdp5_kms->dev->dev,
|
|
|
+ "CTL %d is allocated by INTF %d, but used by INTF %d\n",
|
|
|
+ ctl->id, ctl->pipeline.intf.num, intf->num);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ ctl->lm = lm;
|
|
|
+
|
|
|
memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
|
|
|
|
|
|
ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
|
|
@@ -335,7 +343,7 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u8 *stage, u32 stage_cnt,
|
|
|
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
|
|
|
u32 ctl_blend_op_flags)
|
|
|
{
|
|
|
unsigned long flags;
|
|
@@ -358,13 +366,13 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u8 *stage, u32 stage_cnt,
|
|
|
if (ctl->cursor_on)
|
|
|
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
|
|
|
|
|
|
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
|
|
|
- ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, lm), blend_ext_cfg);
|
|
|
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
|
|
|
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
|
|
|
spin_unlock_irqrestore(&ctl->hw_lock, flags);
|
|
|
|
|
|
- ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
|
|
|
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
|
|
|
|
|
|
- DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", lm,
|
|
|
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
|
|
|
blend_cfg, blend_ext_cfg);
|
|
|
|
|
|
return 0;
|
|
@@ -490,38 +498,18 @@ u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
|
|
|
return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
|
|
|
}
|
|
|
|
|
|
-void mdp5_ctl_release(struct mdp5_ctl *ctl)
|
|
|
-{
|
|
|
- struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
|
|
|
- dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
|
|
|
- ctl->id, ctl->busy);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
|
|
|
- ctl->busy = false;
|
|
|
- spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
|
|
|
-
|
|
|
- DBG("CTL %d released", ctl->id);
|
|
|
-}
|
|
|
-
|
|
|
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
|
|
|
{
|
|
|
return WARN_ON(!ctl) ? -EINVAL : ctl->id;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * mdp5_ctl_request() - CTL dynamic allocation
|
|
|
- *
|
|
|
- * Note: Current implementation considers that we can only have one CRTC per CTL
|
|
|
+ * mdp5_ctl_request() - CTL allocation
|
|
|
*
|
|
|
* @return first free CTL
|
|
|
*/
|
|
|
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
|
|
|
- struct drm_crtc *crtc)
|
|
|
+ int intf_num)
|
|
|
{
|
|
|
struct mdp5_ctl *ctl = NULL;
|
|
|
unsigned long flags;
|
|
@@ -539,9 +527,8 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
|
|
|
}
|
|
|
|
|
|
ctl = &ctl_mgr->ctls[c];
|
|
|
-
|
|
|
- ctl->lm = mdp5_crtc_get_lm(crtc);
|
|
|
- ctl->crtc = crtc;
|
|
|
+ ctl->pipeline.intf.num = intf_num;
|
|
|
+ ctl->lm = -1;
|
|
|
ctl->busy = true;
|
|
|
ctl->pending_ctl_trigger = 0;
|
|
|
DBG("CTL %d allocated", ctl->id);
|