memx.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. #ifndef __NVKM_PMU_MEMX_H__
  2. #define __NVKM_PMU_MEMX_H__
  3. #include "priv.h"
  4. struct nvkm_memx {
  5. struct nvkm_pmu *pmu;
  6. u32 base;
  7. u32 size;
  8. struct {
  9. u32 mthd;
  10. u32 size;
  11. u32 data[64];
  12. } c;
  13. };
  14. static void
  15. memx_out(struct nvkm_memx *memx)
  16. {
  17. struct nvkm_device *device = memx->pmu->subdev.device;
  18. int i;
  19. if (memx->c.mthd) {
  20. nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
  21. for (i = 0; i < memx->c.size; i++)
  22. nvkm_wr32(device, 0x10a1c4, memx->c.data[i]);
  23. memx->c.mthd = 0;
  24. memx->c.size = 0;
  25. }
  26. }
  27. static void
  28. memx_cmd(struct nvkm_memx *memx, u32 mthd, u32 size, u32 data[])
  29. {
  30. if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
  31. (memx->c.mthd && memx->c.mthd != mthd))
  32. memx_out(memx);
  33. memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
  34. memx->c.size += size;
  35. memx->c.mthd = mthd;
  36. }
  37. int
  38. nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
  39. {
  40. struct nvkm_device *device = pmu->subdev.device;
  41. struct nvkm_memx *memx;
  42. u32 reply[2];
  43. int ret;
  44. ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
  45. MEMX_INFO_DATA, 0);
  46. if (ret)
  47. return ret;
  48. memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
  49. if (!memx)
  50. return -ENOMEM;
  51. memx->pmu = pmu;
  52. memx->base = reply[0];
  53. memx->size = reply[1];
  54. /* acquire data segment access */
  55. do {
  56. nvkm_wr32(device, 0x10a580, 0x00000003);
  57. } while (nvkm_rd32(device, 0x10a580) != 0x00000003);
  58. nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base);
  59. return 0;
  60. }
  61. int
  62. nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
  63. {
  64. struct nvkm_memx *memx = *pmemx;
  65. struct nvkm_pmu *pmu = memx->pmu;
  66. struct nvkm_subdev *subdev = &pmu->subdev;
  67. struct nvkm_device *device = subdev->device;
  68. u32 finish, reply[2];
  69. /* flush the cache... */
  70. memx_out(memx);
  71. /* release data segment access */
  72. finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff;
  73. nvkm_wr32(device, 0x10a580, 0x00000000);
  74. /* call MEMX process to execute the script, and wait for reply */
  75. if (exec) {
  76. nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
  77. memx->base, finish);
  78. }
  79. nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
  80. reply[0], reply[1]);
  81. kfree(memx);
  82. return 0;
  83. }
  84. void
  85. nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
  86. {
  87. nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
  88. memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
  89. }
  90. void
  91. nvkm_memx_wait(struct nvkm_memx *memx,
  92. u32 addr, u32 mask, u32 data, u32 nsec)
  93. {
  94. nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
  95. addr, mask, data, nsec);
  96. memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
  97. memx_out(memx); /* fuc can't handle multiple */
  98. }
  99. void
  100. nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
  101. {
  102. nvkm_debug(&memx->pmu->subdev, " DELAY = %d ns\n", nsec);
  103. memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
  104. memx_out(memx); /* fuc can't handle multiple */
  105. }
  106. void
  107. nvkm_memx_wait_vblank(struct nvkm_memx *memx)
  108. {
  109. struct nvkm_subdev *subdev = &memx->pmu->subdev;
  110. struct nvkm_device *device = subdev->device;
  111. u32 heads, x, y, px = 0;
  112. int i, head_sync;
  113. if (device->chipset < 0xd0) {
  114. heads = nvkm_rd32(device, 0x610050);
  115. for (i = 0; i < 2; i++) {
  116. /* Heuristic: sync to head with biggest resolution */
  117. if (heads & (2 << (i << 3))) {
  118. x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
  119. y = (x & 0xffff0000) >> 16;
  120. x &= 0x0000ffff;
  121. if ((x * y) > px) {
  122. px = (x * y);
  123. head_sync = i;
  124. }
  125. }
  126. }
  127. }
  128. if (px == 0) {
  129. nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
  130. return;
  131. }
  132. nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
  133. memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
  134. memx_out(memx); /* fuc can't handle multiple */
  135. }
  136. void
  137. nvkm_memx_train(struct nvkm_memx *memx)
  138. {
  139. nvkm_debug(&memx->pmu->subdev, " MEM TRAIN\n");
  140. memx_cmd(memx, MEMX_TRAIN, 0, NULL);
  141. }
  142. int
  143. nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
  144. {
  145. struct nvkm_device *device = pmu->subdev.device;
  146. u32 reply[2], base, size, i;
  147. int ret;
  148. ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
  149. MEMX_INFO_TRAIN, 0);
  150. if (ret)
  151. return ret;
  152. base = reply[0];
  153. size = reply[1] >> 2;
  154. if (size > rsize)
  155. return -ENOMEM;
  156. /* read the packet */
  157. nvkm_wr32(device, 0x10a1c0, 0x02000000 | base);
  158. for (i = 0; i < size; i++)
  159. res[i] = nvkm_rd32(device, 0x10a1c4);
  160. return 0;
  161. }
  162. void
  163. nvkm_memx_block(struct nvkm_memx *memx)
  164. {
  165. nvkm_debug(&memx->pmu->subdev, " HOST BLOCKED\n");
  166. memx_cmd(memx, MEMX_ENTER, 0, NULL);
  167. }
  168. void
  169. nvkm_memx_unblock(struct nvkm_memx *memx)
  170. {
  171. nvkm_debug(&memx->pmu->subdev, " HOST UNBLOCKED\n");
  172. memx_cmd(memx, MEMX_LEAVE, 0, NULL);
  173. }
  174. #endif