mdp5_ctl.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. /*
  2. * Copyright (c) 2014 The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include "mdp5_kms.h"
  14. #include "mdp5_ctl.h"
  15. /*
  16. * CTL - MDP Control Pool Manager
  17. *
  18. * Controls are shared between all CRTCs.
  19. *
  20. * They are intended to be used for data path configuration.
  21. * The top level register programming describes the complete data path for
  22. * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
  23. *
  24. * Hardware capabilities determine the number of concurrent data paths
  25. *
  26. * In certain use cases (high-resolution dual pipe), one single CTL can be
  27. * shared across multiple CRTCs.
  28. *
  29. * Because the number of CTLs can be less than the number of CRTCs,
  30. * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
  31. * requested by the client (in mdp5_crtc_mode_set()).
  32. */
  33. struct mdp5_ctl {
  34. struct mdp5_ctl_manager *ctlm;
  35. u32 id;
  36. /* whether this CTL has been allocated or not: */
  37. bool busy;
  38. /* memory output connection (@see mdp5_ctl_mode): */
  39. u32 mode;
  40. /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
  41. spinlock_t hw_lock;
  42. u32 reg_offset;
  43. /* flush mask used to commit CTL registers */
  44. u32 flush_mask;
  45. bool cursor_on;
  46. struct drm_crtc *crtc;
  47. };
  48. struct mdp5_ctl_manager {
  49. struct drm_device *dev;
  50. /* number of CTL / Layer Mixers in this hw config: */
  51. u32 nlm;
  52. u32 nctl;
  53. /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
  54. spinlock_t pool_lock;
  55. struct mdp5_ctl ctls[MAX_CTL];
  56. };
  57. static inline
  58. struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
  59. {
  60. struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
  61. return to_mdp5_kms(to_mdp_kms(priv->kms));
  62. }
  63. static inline
  64. void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
  65. {
  66. struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  67. (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  68. mdp5_write(mdp5_kms, reg, data);
  69. }
  70. static inline
  71. u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
  72. {
  73. struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  74. (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  75. return mdp5_read(mdp5_kms, reg);
  76. }
  77. int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
  78. {
  79. unsigned long flags;
  80. static const enum mdp5_intfnum intfnum[] = {
  81. INTF0, INTF1, INTF2, INTF3,
  82. };
  83. spin_lock_irqsave(&ctl->hw_lock, flags);
  84. ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
  85. MDP5_CTL_OP_MODE(ctl->mode) |
  86. MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
  87. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  88. return 0;
  89. }
  90. int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
  91. {
  92. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  93. unsigned long flags;
  94. u32 blend_cfg;
  95. int lm;
  96. lm = mdp5_crtc_get_lm(ctl->crtc);
  97. if (unlikely(WARN_ON(lm < 0))) {
  98. dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
  99. ctl->id, lm);
  100. return -EINVAL;
  101. }
  102. spin_lock_irqsave(&ctl->hw_lock, flags);
  103. blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
  104. if (enable)
  105. blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
  106. else
  107. blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
  108. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
  109. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  110. ctl->cursor_on = enable;
  111. return 0;
  112. }
  113. int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
  114. {
  115. unsigned long flags;
  116. if (ctl->cursor_on)
  117. blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
  118. else
  119. blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
  120. spin_lock_irqsave(&ctl->hw_lock, flags);
  121. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
  122. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  123. return 0;
  124. }
  125. int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
  126. {
  127. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  128. unsigned long flags;
  129. if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
  130. int lm = mdp5_crtc_get_lm(ctl->crtc);
  131. if (unlikely(WARN_ON(lm < 0))) {
  132. dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
  133. ctl->id, lm);
  134. return -EINVAL;
  135. }
  136. /* for current targets, cursor bit is the same as LM bit */
  137. flush_mask |= mdp_ctl_flush_mask_lm(lm);
  138. }
  139. spin_lock_irqsave(&ctl->hw_lock, flags);
  140. ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
  141. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  142. return 0;
  143. }
  144. u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
  145. {
  146. return ctl->flush_mask;
  147. }
  148. void mdp5_ctl_release(struct mdp5_ctl *ctl)
  149. {
  150. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  151. unsigned long flags;
  152. if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
  153. dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
  154. ctl->id, ctl->busy);
  155. return;
  156. }
  157. spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
  158. ctl->busy = false;
  159. spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
  160. DBG("CTL %d released", ctl->id);
  161. }
  162. /*
  163. * mdp5_ctl_request() - CTL dynamic allocation
  164. *
  165. * Note: Current implementation considers that we can only have one CRTC per CTL
  166. *
  167. * @return first free CTL
  168. */
  169. struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
  170. struct drm_crtc *crtc)
  171. {
  172. struct mdp5_ctl *ctl = NULL;
  173. unsigned long flags;
  174. int c;
  175. spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
  176. for (c = 0; c < ctl_mgr->nctl; c++)
  177. if (!ctl_mgr->ctls[c].busy)
  178. break;
  179. if (unlikely(c >= ctl_mgr->nctl)) {
  180. dev_err(ctl_mgr->dev->dev, "No more CTL available!");
  181. goto unlock;
  182. }
  183. ctl = &ctl_mgr->ctls[c];
  184. ctl->crtc = crtc;
  185. ctl->busy = true;
  186. DBG("CTL %d allocated", ctl->id);
  187. unlock:
  188. spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
  189. return ctl;
  190. }
  191. void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
  192. {
  193. unsigned long flags;
  194. int c;
  195. for (c = 0; c < ctl_mgr->nctl; c++) {
  196. struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
  197. spin_lock_irqsave(&ctl->hw_lock, flags);
  198. ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
  199. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  200. }
  201. }
  202. void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
  203. {
  204. kfree(ctl_mgr);
  205. }
  206. struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
  207. void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
  208. {
  209. struct mdp5_ctl_manager *ctl_mgr;
  210. const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
  211. unsigned long flags;
  212. int c, ret;
  213. ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
  214. if (!ctl_mgr) {
  215. dev_err(dev->dev, "failed to allocate CTL manager\n");
  216. ret = -ENOMEM;
  217. goto fail;
  218. }
  219. if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
  220. dev_err(dev->dev, "Increase static pool size to at least %d\n",
  221. ctl_cfg->count);
  222. ret = -ENOSPC;
  223. goto fail;
  224. }
  225. /* initialize the CTL manager: */
  226. ctl_mgr->dev = dev;
  227. ctl_mgr->nlm = hw_cfg->lm.count;
  228. ctl_mgr->nctl = ctl_cfg->count;
  229. spin_lock_init(&ctl_mgr->pool_lock);
  230. /* initialize each CTL of the pool: */
  231. spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
  232. for (c = 0; c < ctl_mgr->nctl; c++) {
  233. struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
  234. if (WARN_ON(!ctl_cfg->base[c])) {
  235. dev_err(dev->dev, "CTL_%d: base is null!\n", c);
  236. ret = -EINVAL;
  237. goto fail;
  238. }
  239. ctl->ctlm = ctl_mgr;
  240. ctl->id = c;
  241. ctl->mode = MODE_NONE;
  242. ctl->reg_offset = ctl_cfg->base[c];
  243. ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
  244. ctl->busy = false;
  245. spin_lock_init(&ctl->hw_lock);
  246. }
  247. spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
  248. DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
  249. return ctl_mgr;
  250. fail:
  251. if (ctl_mgr)
  252. mdp5_ctlm_destroy(ctl_mgr);
  253. return ERR_PTR(ret);
  254. }