mdp5_smp.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /*
  2. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <robdclark@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "mdp5_kms.h"
  19. #include "mdp5_smp.h"
  20. struct mdp5_smp {
  21. struct drm_device *dev;
  22. uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
  23. int blk_cnt;
  24. int blk_size;
  25. };
  26. static inline
  27. struct mdp5_kms *get_kms(struct mdp5_smp *smp)
  28. {
  29. struct msm_drm_private *priv = smp->dev->dev_private;
  30. return to_mdp5_kms(to_mdp_kms(priv->kms));
  31. }
  32. static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
  33. {
  34. #define CID_UNUSED 0
  35. if (WARN_ON(plane >= pipe2nclients(pipe)))
  36. return CID_UNUSED;
  37. /*
  38. * Note on SMP clients:
  39. * For ViG pipes, fetch Y/Cr/Cb-components clients are always
  40. * consecutive, and in that order.
  41. *
  42. * e.g.:
  43. * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
  44. * Y plane's client ID is N
  45. * Cr plane's client ID is N + 1
  46. * Cb plane's client ID is N + 2
  47. */
  48. return mdp5_cfg->smp.clients[pipe] + plane;
  49. }
  50. /* allocate blocks for the specified request: */
  51. static int smp_request_block(struct mdp5_smp *smp,
  52. struct mdp5_smp_state *state,
  53. u32 cid, int nblks)
  54. {
  55. void *cs = state->client_state[cid];
  56. int i, avail, cnt = smp->blk_cnt;
  57. uint8_t reserved;
  58. /* we shouldn't be requesting blocks for an in-use client: */
  59. WARN_ON(bitmap_weight(cs, cnt) > 0);
  60. reserved = smp->reserved[cid];
  61. if (reserved) {
  62. nblks = max(0, nblks - reserved);
  63. DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
  64. }
  65. avail = cnt - bitmap_weight(state->state, cnt);
  66. if (nblks > avail) {
  67. dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
  68. nblks, avail);
  69. return -ENOSPC;
  70. }
  71. for (i = 0; i < nblks; i++) {
  72. int blk = find_first_zero_bit(state->state, cnt);
  73. set_bit(blk, cs);
  74. set_bit(blk, state->state);
  75. }
  76. return 0;
  77. }
  78. static void set_fifo_thresholds(struct mdp5_smp *smp,
  79. enum mdp5_pipe pipe, int nblks)
  80. {
  81. struct mdp5_kms *mdp5_kms = get_kms(smp);
  82. u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
  83. u32 val;
  84. /* 1/4 of SMP pool that is being fetched */
  85. val = (nblks * smp_entries_per_blk) / 4;
  86. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
  87. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
  88. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
  89. }
  90. /*
  91. * NOTE: looks like if horizontal decimation is used (if we supported that)
  92. * then the width used to calculate SMP block requirements is the post-
  93. * decimated width. Ie. SMP buffering sits downstream of decimation (which
  94. * presumably happens during the dma from scanout buffer).
  95. */
  96. uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
  97. const struct mdp_format *format,
  98. u32 width, bool hdecim)
  99. {
  100. struct mdp5_kms *mdp5_kms = get_kms(smp);
  101. int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
  102. int i, hsub, nplanes, nlines;
  103. u32 fmt = format->base.pixel_format;
  104. uint32_t blkcfg = 0;
  105. nplanes = drm_format_num_planes(fmt);
  106. hsub = drm_format_horz_chroma_subsampling(fmt);
  107. /* different if BWC (compressed framebuffer?) enabled: */
  108. nlines = 2;
  109. /* Newer MDPs have split/packing logic, which fetches sub-sampled
  110. * U and V components (splits them from Y if necessary) and packs
  111. * them together, writes to SMP using a single client.
  112. */
  113. if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
  114. fmt = DRM_FORMAT_NV24;
  115. nplanes = 2;
  116. /* if decimation is enabled, HW decimates less on the
  117. * sub sampled chroma components
  118. */
  119. if (hdecim && (hsub > 1))
  120. hsub = 1;
  121. }
  122. for (i = 0; i < nplanes; i++) {
  123. int n, fetch_stride, cpp;
  124. cpp = drm_format_plane_cpp(fmt, i);
  125. fetch_stride = width * cpp / (i ? hsub : 1);
  126. n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
  127. /* for hw rev v1.00 */
  128. if (rev == 0)
  129. n = roundup_pow_of_two(n);
  130. blkcfg |= (n << (8 * i));
  131. }
  132. return blkcfg;
  133. }
  134. int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
  135. enum mdp5_pipe pipe, uint32_t blkcfg)
  136. {
  137. struct mdp5_kms *mdp5_kms = get_kms(smp);
  138. struct drm_device *dev = mdp5_kms->dev;
  139. int i, ret;
  140. for (i = 0; i < pipe2nclients(pipe); i++) {
  141. u32 cid = pipe2client(pipe, i);
  142. int n = blkcfg & 0xff;
  143. if (!n)
  144. continue;
  145. DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
  146. ret = smp_request_block(smp, state, cid, n);
  147. if (ret) {
  148. dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
  149. n, ret);
  150. return ret;
  151. }
  152. blkcfg >>= 8;
  153. }
  154. state->assigned |= (1 << pipe);
  155. return 0;
  156. }
  157. /* Release SMP blocks for all clients of the pipe */
  158. void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
  159. enum mdp5_pipe pipe)
  160. {
  161. int i;
  162. int cnt = smp->blk_cnt;
  163. for (i = 0; i < pipe2nclients(pipe); i++) {
  164. u32 cid = pipe2client(pipe, i);
  165. void *cs = state->client_state[cid];
  166. /* update global state: */
  167. bitmap_andnot(state->state, state->state, cs, cnt);
  168. /* clear client's state */
  169. bitmap_zero(cs, cnt);
  170. }
  171. state->released |= (1 << pipe);
  172. }
  173. /* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
  174. * happen after scanout completes.
  175. */
  176. static unsigned update_smp_state(struct mdp5_smp *smp,
  177. u32 cid, mdp5_smp_state_t *assigned)
  178. {
  179. struct mdp5_kms *mdp5_kms = get_kms(smp);
  180. int cnt = smp->blk_cnt;
  181. unsigned nblks = 0;
  182. u32 blk, val;
  183. for_each_set_bit(blk, *assigned, cnt) {
  184. int idx = blk / 3;
  185. int fld = blk % 3;
  186. val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
  187. switch (fld) {
  188. case 0:
  189. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
  190. val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
  191. break;
  192. case 1:
  193. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
  194. val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
  195. break;
  196. case 2:
  197. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
  198. val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
  199. break;
  200. }
  201. mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
  202. mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
  203. nblks++;
  204. }
  205. return nblks;
  206. }
  207. void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
  208. {
  209. enum mdp5_pipe pipe;
  210. for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
  211. unsigned i, nblks = 0;
  212. for (i = 0; i < pipe2nclients(pipe); i++) {
  213. u32 cid = pipe2client(pipe, i);
  214. void *cs = state->client_state[cid];
  215. nblks += update_smp_state(smp, cid, cs);
  216. DBG("assign %s:%u, %u blks",
  217. pipe2name(pipe), i, nblks);
  218. }
  219. set_fifo_thresholds(smp, pipe, nblks);
  220. }
  221. state->assigned = 0;
  222. }
  223. void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
  224. {
  225. enum mdp5_pipe pipe;
  226. for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
  227. DBG("release %s", pipe2name(pipe));
  228. set_fifo_thresholds(smp, pipe, 0);
  229. }
  230. state->released = 0;
  231. }
  232. void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
  233. {
  234. struct mdp5_kms *mdp5_kms = get_kms(smp);
  235. struct mdp5_hw_pipe_state *hwpstate;
  236. struct mdp5_smp_state *state;
  237. int total = 0, i, j;
  238. drm_printf(p, "name\tinuse\tplane\n");
  239. drm_printf(p, "----\t-----\t-----\n");
  240. if (drm_can_sleep())
  241. drm_modeset_lock(&mdp5_kms->state_lock, NULL);
  242. /* grab these *after* we hold the state_lock */
  243. hwpstate = &mdp5_kms->state->hwpipe;
  244. state = &mdp5_kms->state->smp;
  245. for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
  246. struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
  247. struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
  248. enum mdp5_pipe pipe = hwpipe->pipe;
  249. for (j = 0; j < pipe2nclients(pipe); j++) {
  250. u32 cid = pipe2client(pipe, j);
  251. void *cs = state->client_state[cid];
  252. int inuse = bitmap_weight(cs, smp->blk_cnt);
  253. drm_printf(p, "%s:%d\t%d\t%s\n",
  254. pipe2name(pipe), j, inuse,
  255. plane ? plane->name : NULL);
  256. total += inuse;
  257. }
  258. }
  259. drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
  260. drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
  261. bitmap_weight(state->state, smp->blk_cnt));
  262. if (drm_can_sleep())
  263. drm_modeset_unlock(&mdp5_kms->state_lock);
  264. }
  265. void mdp5_smp_destroy(struct mdp5_smp *smp)
  266. {
  267. kfree(smp);
  268. }
  269. struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
  270. {
  271. struct mdp5_smp_state *state = &mdp5_kms->state->smp;
  272. struct mdp5_smp *smp = NULL;
  273. int ret;
  274. smp = kzalloc(sizeof(*smp), GFP_KERNEL);
  275. if (unlikely(!smp)) {
  276. ret = -ENOMEM;
  277. goto fail;
  278. }
  279. smp->dev = mdp5_kms->dev;
  280. smp->blk_cnt = cfg->mmb_count;
  281. smp->blk_size = cfg->mmb_size;
  282. /* statically tied MMBs cannot be re-allocated: */
  283. bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
  284. memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
  285. return smp;
  286. fail:
  287. if (smp)
  288. mdp5_smp_destroy(smp);
  289. return ERR_PTR(ret);
  290. }