mdp5_smp.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <robdclark@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "mdp5_kms.h"
  19. #include "mdp5_smp.h"
  20. /* SMP - Shared Memory Pool
  21. *
  22. * These are shared between all the clients, where each plane in a
  23. * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
  24. * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
  25. *
  26. * Based on the size of the attached scanout buffer, a certain # of
  27. * blocks must be allocated to that client out of the shared pool.
  28. *
  29. * In some hw, some blocks are statically allocated for certain pipes
  30. * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
  31. *
  32. * For each block that can be dynamically allocated, it can be either
  33. * free, or pending/in-use by a client. The updates happen in three steps:
  34. *
  35. * 1) mdp5_smp_request():
  36. * When plane scanout is setup, calculate required number of
  37. * blocks needed per client, and request. Blocks not inuse or
  38. * pending by any other client are added to client's pending
  39. * set.
  40. *
  41. * 2) mdp5_smp_configure():
  42. * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
  43. * are configured for the union(pending, inuse)
  44. *
  45. * 3) mdp5_smp_commit():
  46. * After next vblank, copy pending -> inuse. Optionally update
  47. * MDP5_SMP_ALLOC registers if there are newly unused blocks
  48. *
  49. * On the next vblank after changes have been committed to hw, the
  50. * client's pending blocks become it's in-use blocks (and no-longer
  51. * in-use blocks become available to other clients).
  52. *
  53. * btw, hurray for confusing overloaded acronyms! :-/
  54. *
  55. * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
  56. * should happen at (or before)? atomic->check(). And we'd need
  57. * an API to discard previous requests if update is aborted or
  58. * (test-only).
  59. *
  60. * TODO would perhaps be nice to have debugfs to dump out kernel
  61. * inuse and pending state of all clients..
  62. */
  63. struct mdp5_smp {
  64. struct drm_device *dev;
  65. int blk_cnt;
  66. int blk_size;
  67. spinlock_t state_lock;
  68. mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
  69. struct mdp5_client_smp_state client_state[CID_MAX];
  70. };
  71. static inline
  72. struct mdp5_kms *get_kms(struct mdp5_smp *smp)
  73. {
  74. struct msm_drm_private *priv = smp->dev->dev_private;
  75. return to_mdp5_kms(to_mdp_kms(priv->kms));
  76. }
  77. static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
  78. {
  79. WARN_ON(plane >= pipe2nclients(pipe));
  80. switch (pipe) {
  81. case SSPP_VIG0: return CID_VIG0_Y + plane;
  82. case SSPP_VIG1: return CID_VIG1_Y + plane;
  83. case SSPP_VIG2: return CID_VIG2_Y + plane;
  84. case SSPP_RGB0: return CID_RGB0;
  85. case SSPP_RGB1: return CID_RGB1;
  86. case SSPP_RGB2: return CID_RGB2;
  87. case SSPP_DMA0: return CID_DMA0_Y + plane;
  88. case SSPP_DMA1: return CID_DMA1_Y + plane;
  89. case SSPP_VIG3: return CID_VIG3_Y + plane;
  90. case SSPP_RGB3: return CID_RGB3;
  91. default: return CID_UNUSED;
  92. }
  93. }
  94. /* step #1: update # of blocks pending for the client: */
  95. static int smp_request_block(struct mdp5_smp *smp,
  96. enum mdp5_client_id cid, int nblks)
  97. {
  98. struct mdp5_kms *mdp5_kms = get_kms(smp);
  99. const struct mdp5_cfg_hw *hw_cfg;
  100. struct mdp5_client_smp_state *ps = &smp->client_state[cid];
  101. int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
  102. int reserved;
  103. unsigned long flags;
  104. hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
  105. reserved = hw_cfg->smp.reserved[cid];
  106. spin_lock_irqsave(&smp->state_lock, flags);
  107. nblks -= reserved;
  108. if (reserved)
  109. DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
  110. avail = cnt - bitmap_weight(smp->state, cnt);
  111. if (nblks > avail) {
  112. dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
  113. nblks, avail);
  114. ret = -ENOSPC;
  115. goto fail;
  116. }
  117. cur_nblks = bitmap_weight(ps->pending, cnt);
  118. if (nblks > cur_nblks) {
  119. /* grow the existing pending reservation: */
  120. for (i = cur_nblks; i < nblks; i++) {
  121. int blk = find_first_zero_bit(smp->state, cnt);
  122. set_bit(blk, ps->pending);
  123. set_bit(blk, smp->state);
  124. }
  125. } else {
  126. /* shrink the existing pending reservation: */
  127. for (i = cur_nblks; i > nblks; i--) {
  128. int blk = find_first_bit(ps->pending, cnt);
  129. clear_bit(blk, ps->pending);
  130. /* don't clear in global smp_state until _commit() */
  131. }
  132. }
  133. fail:
  134. spin_unlock_irqrestore(&smp->state_lock, flags);
  135. return 0;
  136. }
  137. static void set_fifo_thresholds(struct mdp5_smp *smp,
  138. enum mdp5_pipe pipe, int nblks)
  139. {
  140. struct mdp5_kms *mdp5_kms = get_kms(smp);
  141. u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
  142. u32 val;
  143. /* 1/4 of SMP pool that is being fetched */
  144. val = (nblks * smp_entries_per_blk) / 4;
  145. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
  146. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
  147. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
  148. }
  149. /*
  150. * NOTE: looks like if horizontal decimation is used (if we supported that)
  151. * then the width used to calculate SMP block requirements is the post-
  152. * decimated width. Ie. SMP buffering sits downstream of decimation (which
  153. * presumably happens during the dma from scanout buffer).
  154. */
  155. int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
  156. {
  157. struct mdp5_kms *mdp5_kms = get_kms(smp);
  158. struct drm_device *dev = mdp5_kms->dev;
  159. int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
  160. int i, hsub, nplanes, nlines, nblks, ret;
  161. nplanes = drm_format_num_planes(fmt);
  162. hsub = drm_format_horz_chroma_subsampling(fmt);
  163. /* different if BWC (compressed framebuffer?) enabled: */
  164. nlines = 2;
  165. for (i = 0, nblks = 0; i < nplanes; i++) {
  166. int n, fetch_stride, cpp;
  167. cpp = drm_format_plane_cpp(fmt, i);
  168. fetch_stride = width * cpp / (i ? hsub : 1);
  169. n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
  170. /* for hw rev v1.00 */
  171. if (rev == 0)
  172. n = roundup_pow_of_two(n);
  173. DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
  174. ret = smp_request_block(smp, pipe2client(pipe, i), n);
  175. if (ret) {
  176. dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
  177. n, ret);
  178. return ret;
  179. }
  180. nblks += n;
  181. }
  182. set_fifo_thresholds(smp, pipe, nblks);
  183. return 0;
  184. }
  185. /* Release SMP blocks for all clients of the pipe */
  186. void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
  187. {
  188. int i, nblks;
  189. for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
  190. smp_request_block(smp, pipe2client(pipe, i), 0);
  191. set_fifo_thresholds(smp, pipe, 0);
  192. }
  193. static void update_smp_state(struct mdp5_smp *smp,
  194. enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
  195. {
  196. struct mdp5_kms *mdp5_kms = get_kms(smp);
  197. int cnt = smp->blk_cnt;
  198. u32 blk, val;
  199. for_each_set_bit(blk, *assigned, cnt) {
  200. int idx = blk / 3;
  201. int fld = blk % 3;
  202. val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
  203. switch (fld) {
  204. case 0:
  205. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
  206. val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
  207. break;
  208. case 1:
  209. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
  210. val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
  211. break;
  212. case 2:
  213. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
  214. val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
  215. break;
  216. }
  217. mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
  218. mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
  219. }
  220. }
  221. /* step #2: configure hw for union(pending, inuse): */
  222. void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
  223. {
  224. int cnt = smp->blk_cnt;
  225. mdp5_smp_state_t assigned;
  226. int i;
  227. for (i = 0; i < pipe2nclients(pipe); i++) {
  228. enum mdp5_client_id cid = pipe2client(pipe, i);
  229. struct mdp5_client_smp_state *ps = &smp->client_state[cid];
  230. bitmap_or(assigned, ps->inuse, ps->pending, cnt);
  231. update_smp_state(smp, cid, &assigned);
  232. }
  233. }
  234. /* step #3: after vblank, copy pending -> inuse: */
  235. void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
  236. {
  237. int cnt = smp->blk_cnt;
  238. mdp5_smp_state_t released;
  239. int i;
  240. for (i = 0; i < pipe2nclients(pipe); i++) {
  241. enum mdp5_client_id cid = pipe2client(pipe, i);
  242. struct mdp5_client_smp_state *ps = &smp->client_state[cid];
  243. /*
  244. * Figure out if there are any blocks we where previously
  245. * using, which can be released and made available to other
  246. * clients:
  247. */
  248. if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
  249. unsigned long flags;
  250. spin_lock_irqsave(&smp->state_lock, flags);
  251. /* clear released blocks: */
  252. bitmap_andnot(smp->state, smp->state, released, cnt);
  253. spin_unlock_irqrestore(&smp->state_lock, flags);
  254. update_smp_state(smp, CID_UNUSED, &released);
  255. }
  256. bitmap_copy(ps->inuse, ps->pending, cnt);
  257. }
  258. }
  259. void mdp5_smp_destroy(struct mdp5_smp *smp)
  260. {
  261. kfree(smp);
  262. }
  263. struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
  264. {
  265. struct mdp5_smp *smp = NULL;
  266. int ret;
  267. smp = kzalloc(sizeof(*smp), GFP_KERNEL);
  268. if (unlikely(!smp)) {
  269. ret = -ENOMEM;
  270. goto fail;
  271. }
  272. smp->dev = dev;
  273. smp->blk_cnt = cfg->mmb_count;
  274. smp->blk_size = cfg->mmb_size;
  275. /* statically tied MMBs cannot be re-allocated: */
  276. bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
  277. spin_lock_init(&smp->state_lock);
  278. return smp;
  279. fail:
  280. if (smp)
  281. mdp5_smp_destroy(smp);
  282. return ERR_PTR(ret);
  283. }