mdp5_smp.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "mdp5_kms.h"
  18. #include "mdp5_smp.h"
  19. /* SMP - Shared Memory Pool
  20. *
  21. * These are shared between all the clients, where each plane in a
  22. * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
  23. * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
  24. *
  25. * Based on the size of the attached scanout buffer, a certain # of
  26. * blocks must be allocated to that client out of the shared pool.
  27. *
  28. * For each block, it can be either free, or pending/in-use by a
  29. * client. The updates happen in three steps:
  30. *
  31. * 1) mdp5_smp_request():
  32. * When plane scanout is setup, calculate required number of
  33. * blocks needed per client, and request. Blocks not inuse or
  34. * pending by any other client are added to client's pending
  35. * set.
  36. *
  37. * 2) mdp5_smp_configure():
  38. * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
  39. * are configured for the union(pending, inuse)
  40. *
  41. * 3) mdp5_smp_commit():
  42. * After next vblank, copy pending -> inuse. Optionally update
  43. * MDP5_SMP_ALLOC registers if there are newly unused blocks
  44. *
  45. * On the next vblank after changes have been committed to hw, the
  46. * client's pending blocks become it's in-use blocks (and no-longer
  47. * in-use blocks become available to other clients).
  48. *
  49. * btw, hurray for confusing overloaded acronyms! :-/
  50. *
  51. * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
  52. * should happen at (or before)? atomic->check(). And we'd need
  53. * an API to discard previous requests if update is aborted or
  54. * (test-only).
  55. *
  56. * TODO would perhaps be nice to have debugfs to dump out kernel
  57. * inuse and pending state of all clients..
  58. */
  59. static DEFINE_SPINLOCK(smp_lock);
  60. /* step #1: update # of blocks pending for the client: */
  61. int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
  62. enum mdp5_client_id cid, int nblks)
  63. {
  64. struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
  65. int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
  66. unsigned long flags;
  67. spin_lock_irqsave(&smp_lock, flags);
  68. avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
  69. if (nblks > avail) {
  70. ret = -ENOSPC;
  71. goto fail;
  72. }
  73. cur_nblks = bitmap_weight(ps->pending, cnt);
  74. if (nblks > cur_nblks) {
  75. /* grow the existing pending reservation: */
  76. for (i = cur_nblks; i < nblks; i++) {
  77. int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
  78. set_bit(blk, ps->pending);
  79. set_bit(blk, mdp5_kms->smp_state);
  80. }
  81. } else {
  82. /* shrink the existing pending reservation: */
  83. for (i = cur_nblks; i > nblks; i--) {
  84. int blk = find_first_bit(ps->pending, cnt);
  85. clear_bit(blk, ps->pending);
  86. /* don't clear in global smp_state until _commit() */
  87. }
  88. }
  89. fail:
  90. spin_unlock_irqrestore(&smp_lock, flags);
  91. return 0;
  92. }
  93. static void update_smp_state(struct mdp5_kms *mdp5_kms,
  94. enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
  95. {
  96. int cnt = mdp5_kms->smp_blk_cnt;
  97. uint32_t blk, val;
  98. for_each_set_bit(blk, *assigned, cnt) {
  99. int idx = blk / 3;
  100. int fld = blk % 3;
  101. val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
  102. switch (fld) {
  103. case 0:
  104. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
  105. val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
  106. break;
  107. case 1:
  108. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
  109. val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
  110. break;
  111. case 2:
  112. val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
  113. val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
  114. break;
  115. }
  116. mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
  117. mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
  118. }
  119. }
  120. /* step #2: configure hw for union(pending, inuse): */
  121. void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
  122. {
  123. struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
  124. int cnt = mdp5_kms->smp_blk_cnt;
  125. mdp5_smp_state_t assigned;
  126. bitmap_or(assigned, ps->inuse, ps->pending, cnt);
  127. update_smp_state(mdp5_kms, cid, &assigned);
  128. }
  129. /* step #3: after vblank, copy pending -> inuse: */
  130. void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
  131. {
  132. struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
  133. int cnt = mdp5_kms->smp_blk_cnt;
  134. mdp5_smp_state_t released;
  135. /*
  136. * Figure out if there are any blocks we where previously
  137. * using, which can be released and made available to other
  138. * clients:
  139. */
  140. if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
  141. unsigned long flags;
  142. spin_lock_irqsave(&smp_lock, flags);
  143. /* clear released blocks: */
  144. bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
  145. released, cnt);
  146. spin_unlock_irqrestore(&smp_lock, flags);
  147. update_smp_state(mdp5_kms, CID_UNUSED, &released);
  148. }
  149. bitmap_copy(ps->inuse, ps->pending, cnt);
  150. }