mdp5_smp.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. /*
  2. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <robdclark@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "mdp5_kms.h"
  19. #include "mdp5_smp.h"
  20. struct mdp5_smp {
  21. struct drm_device *dev;
  22. uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
  23. int blk_cnt;
  24. int blk_size;
  25. /* register cache */
  26. u32 alloc_w[22];
  27. u32 alloc_r[22];
  28. u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
  29. u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
  30. u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
  31. };
  32. static inline
  33. struct mdp5_kms *get_kms(struct mdp5_smp *smp)
  34. {
  35. struct msm_drm_private *priv = smp->dev->dev_private;
  36. return to_mdp5_kms(to_mdp_kms(priv->kms));
  37. }
  38. static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
  39. {
  40. #define CID_UNUSED 0
  41. if (WARN_ON(plane >= pipe2nclients(pipe)))
  42. return CID_UNUSED;
  43. /*
  44. * Note on SMP clients:
  45. * For ViG pipes, fetch Y/Cr/Cb-components clients are always
  46. * consecutive, and in that order.
  47. *
  48. * e.g.:
  49. * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
  50. * Y plane's client ID is N
  51. * Cr plane's client ID is N + 1
  52. * Cb plane's client ID is N + 2
  53. */
  54. return mdp5_cfg->smp.clients[pipe] + plane;
  55. }
  56. /* allocate blocks for the specified request: */
  57. static int smp_request_block(struct mdp5_smp *smp,
  58. struct mdp5_smp_state *state,
  59. u32 cid, int nblks)
  60. {
  61. void *cs = state->client_state[cid];
  62. int i, avail, cnt = smp->blk_cnt;
  63. uint8_t reserved;
  64. /* we shouldn't be requesting blocks for an in-use client: */
  65. WARN_ON(bitmap_weight(cs, cnt) > 0);
  66. reserved = smp->reserved[cid];
  67. if (reserved) {
  68. nblks = max(0, nblks - reserved);
  69. DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
  70. }
  71. avail = cnt - bitmap_weight(state->state, cnt);
  72. if (nblks > avail) {
  73. dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
  74. nblks, avail);
  75. return -ENOSPC;
  76. }
  77. for (i = 0; i < nblks; i++) {
  78. int blk = find_first_zero_bit(state->state, cnt);
  79. set_bit(blk, cs);
  80. set_bit(blk, state->state);
  81. }
  82. return 0;
  83. }
  84. static void set_fifo_thresholds(struct mdp5_smp *smp,
  85. enum mdp5_pipe pipe, int nblks)
  86. {
  87. u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
  88. u32 val;
  89. /* 1/4 of SMP pool that is being fetched */
  90. val = (nblks * smp_entries_per_blk) / 4;
  91. smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
  92. smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
  93. smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
  94. }
  95. /*
  96. * NOTE: looks like if horizontal decimation is used (if we supported that)
  97. * then the width used to calculate SMP block requirements is the post-
  98. * decimated width. Ie. SMP buffering sits downstream of decimation (which
  99. * presumably happens during the dma from scanout buffer).
  100. */
  101. uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
  102. const struct mdp_format *format,
  103. u32 width, bool hdecim)
  104. {
  105. struct mdp5_kms *mdp5_kms = get_kms(smp);
  106. int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
  107. int i, hsub, nplanes, nlines;
  108. u32 fmt = format->base.pixel_format;
  109. uint32_t blkcfg = 0;
  110. nplanes = drm_format_num_planes(fmt);
  111. hsub = drm_format_horz_chroma_subsampling(fmt);
  112. /* different if BWC (compressed framebuffer?) enabled: */
  113. nlines = 2;
  114. /* Newer MDPs have split/packing logic, which fetches sub-sampled
  115. * U and V components (splits them from Y if necessary) and packs
  116. * them together, writes to SMP using a single client.
  117. */
  118. if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
  119. fmt = DRM_FORMAT_NV24;
  120. nplanes = 2;
  121. /* if decimation is enabled, HW decimates less on the
  122. * sub sampled chroma components
  123. */
  124. if (hdecim && (hsub > 1))
  125. hsub = 1;
  126. }
  127. for (i = 0; i < nplanes; i++) {
  128. int n, fetch_stride, cpp;
  129. cpp = drm_format_plane_cpp(fmt, i);
  130. fetch_stride = width * cpp / (i ? hsub : 1);
  131. n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
  132. /* for hw rev v1.00 */
  133. if (rev == 0)
  134. n = roundup_pow_of_two(n);
  135. blkcfg |= (n << (8 * i));
  136. }
  137. return blkcfg;
  138. }
  139. int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
  140. enum mdp5_pipe pipe, uint32_t blkcfg)
  141. {
  142. struct mdp5_kms *mdp5_kms = get_kms(smp);
  143. struct drm_device *dev = mdp5_kms->dev;
  144. int i, ret;
  145. for (i = 0; i < pipe2nclients(pipe); i++) {
  146. u32 cid = pipe2client(pipe, i);
  147. int n = blkcfg & 0xff;
  148. if (!n)
  149. continue;
  150. DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
  151. ret = smp_request_block(smp, state, cid, n);
  152. if (ret) {
  153. dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
  154. n, ret);
  155. return ret;
  156. }
  157. blkcfg >>= 8;
  158. }
  159. state->assigned |= (1 << pipe);
  160. return 0;
  161. }
  162. /* Release SMP blocks for all clients of the pipe */
  163. void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
  164. enum mdp5_pipe pipe)
  165. {
  166. int i;
  167. int cnt = smp->blk_cnt;
  168. for (i = 0; i < pipe2nclients(pipe); i++) {
  169. u32 cid = pipe2client(pipe, i);
  170. void *cs = state->client_state[cid];
  171. /* update global state: */
  172. bitmap_andnot(state->state, state->state, cs, cnt);
  173. /* clear client's state */
  174. bitmap_zero(cs, cnt);
  175. }
  176. state->released |= (1 << pipe);
  177. }
  178. /* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
  179. * happen after scanout completes.
  180. */
  181. static unsigned update_smp_state(struct mdp5_smp *smp,
  182. u32 cid, mdp5_smp_state_t *assigned)
  183. {
  184. int cnt = smp->blk_cnt;
  185. unsigned nblks = 0;
  186. u32 blk, val;
  187. for_each_set_bit(blk, *assigned, cnt) {
  188. int idx = blk / 3;
  189. int fld = blk % 3;
  190. val = smp->alloc_w[idx];
  191. switch (fld) {
  192. case 0:
  193. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
  194. val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
  195. break;
  196. case 1:
  197. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
  198. val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
  199. break;
  200. case 2:
  201. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
  202. val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
  203. break;
  204. }
  205. smp->alloc_w[idx] = val;
  206. smp->alloc_r[idx] = val;
  207. nblks++;
  208. }
  209. return nblks;
  210. }
  211. static void write_smp_alloc_regs(struct mdp5_smp *smp)
  212. {
  213. struct mdp5_kms *mdp5_kms = get_kms(smp);
  214. int i, num_regs;
  215. num_regs = smp->blk_cnt / 3 + 1;
  216. for (i = 0; i < num_regs; i++) {
  217. mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
  218. smp->alloc_w[i]);
  219. mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
  220. smp->alloc_r[i]);
  221. }
  222. }
  223. static void write_smp_fifo_regs(struct mdp5_smp *smp)
  224. {
  225. struct mdp5_kms *mdp5_kms = get_kms(smp);
  226. int i;
  227. for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
  228. struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
  229. enum mdp5_pipe pipe = hwpipe->pipe;
  230. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
  231. smp->pipe_reqprio_fifo_wm0[pipe]);
  232. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
  233. smp->pipe_reqprio_fifo_wm1[pipe]);
  234. mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
  235. smp->pipe_reqprio_fifo_wm2[pipe]);
  236. }
  237. }
  238. void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
  239. {
  240. enum mdp5_pipe pipe;
  241. for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
  242. unsigned i, nblks = 0;
  243. for (i = 0; i < pipe2nclients(pipe); i++) {
  244. u32 cid = pipe2client(pipe, i);
  245. void *cs = state->client_state[cid];
  246. nblks += update_smp_state(smp, cid, cs);
  247. DBG("assign %s:%u, %u blks",
  248. pipe2name(pipe), i, nblks);
  249. }
  250. set_fifo_thresholds(smp, pipe, nblks);
  251. }
  252. write_smp_alloc_regs(smp);
  253. write_smp_fifo_regs(smp);
  254. state->assigned = 0;
  255. }
  256. void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
  257. {
  258. enum mdp5_pipe pipe;
  259. for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
  260. DBG("release %s", pipe2name(pipe));
  261. set_fifo_thresholds(smp, pipe, 0);
  262. }
  263. write_smp_fifo_regs(smp);
  264. state->released = 0;
  265. }
  266. void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
  267. {
  268. struct mdp5_kms *mdp5_kms = get_kms(smp);
  269. struct mdp5_hw_pipe_state *hwpstate;
  270. struct mdp5_smp_state *state;
  271. struct mdp5_global_state *global_state;
  272. int total = 0, i, j;
  273. drm_printf(p, "name\tinuse\tplane\n");
  274. drm_printf(p, "----\t-----\t-----\n");
  275. if (drm_can_sleep())
  276. drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
  277. global_state = mdp5_get_existing_global_state(mdp5_kms);
  278. /* grab these *after* we hold the state_lock */
  279. hwpstate = &global_state->hwpipe;
  280. state = &global_state->smp;
  281. for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
  282. struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
  283. struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
  284. enum mdp5_pipe pipe = hwpipe->pipe;
  285. for (j = 0; j < pipe2nclients(pipe); j++) {
  286. u32 cid = pipe2client(pipe, j);
  287. void *cs = state->client_state[cid];
  288. int inuse = bitmap_weight(cs, smp->blk_cnt);
  289. drm_printf(p, "%s:%d\t%d\t%s\n",
  290. pipe2name(pipe), j, inuse,
  291. plane ? plane->name : NULL);
  292. total += inuse;
  293. }
  294. }
  295. drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
  296. drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
  297. bitmap_weight(state->state, smp->blk_cnt));
  298. if (drm_can_sleep())
  299. drm_modeset_unlock(&mdp5_kms->glob_state_lock);
  300. }
  301. void mdp5_smp_destroy(struct mdp5_smp *smp)
  302. {
  303. kfree(smp);
  304. }
  305. struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
  306. {
  307. struct mdp5_smp_state *state;
  308. struct mdp5_global_state *global_state;
  309. struct mdp5_smp *smp = NULL;
  310. int ret;
  311. smp = kzalloc(sizeof(*smp), GFP_KERNEL);
  312. if (unlikely(!smp)) {
  313. ret = -ENOMEM;
  314. goto fail;
  315. }
  316. smp->dev = mdp5_kms->dev;
  317. smp->blk_cnt = cfg->mmb_count;
  318. smp->blk_size = cfg->mmb_size;
  319. global_state = mdp5_get_existing_global_state(mdp5_kms);
  320. state = &global_state->smp;
  321. /* statically tied MMBs cannot be re-allocated: */
  322. bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
  323. memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
  324. return smp;
  325. fail:
  326. if (smp)
  327. mdp5_smp_destroy(smp);
  328. return ERR_PTR(ret);
  329. }