base.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "priv.h"
  25. #include <subdev/timer.h>
  26. void
  27. nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
  28. {
  29. if (pmu->func->pgob)
  30. pmu->func->pgob(pmu, enable);
  31. }
  32. int
  33. nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
  34. u32 process, u32 message, u32 data0, u32 data1)
  35. {
  36. struct nvkm_subdev *subdev = &pmu->subdev;
  37. struct nvkm_device *device = subdev->device;
  38. u32 addr;
  39. /* wait for a free slot in the fifo */
  40. addr = nvkm_rd32(device, 0x10a4a0);
  41. if (nvkm_msec(device, 2000,
  42. u32 tmp = nvkm_rd32(device, 0x10a4b0);
  43. if (tmp != (addr ^ 8))
  44. break;
  45. ) < 0)
  46. return -EBUSY;
  47. /* we currently only support a single process at a time waiting
  48. * on a synchronous reply, take the PMU mutex and tell the
  49. * receive handler what we're waiting for
  50. */
  51. if (reply) {
  52. mutex_lock(&subdev->mutex);
  53. pmu->recv.message = message;
  54. pmu->recv.process = process;
  55. }
  56. /* acquire data segment access */
  57. do {
  58. nvkm_wr32(device, 0x10a580, 0x00000001);
  59. } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
  60. /* write the packet */
  61. nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
  62. pmu->send.base));
  63. nvkm_wr32(device, 0x10a1c4, process);
  64. nvkm_wr32(device, 0x10a1c4, message);
  65. nvkm_wr32(device, 0x10a1c4, data0);
  66. nvkm_wr32(device, 0x10a1c4, data1);
  67. nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
  68. /* release data segment access */
  69. nvkm_wr32(device, 0x10a580, 0x00000000);
  70. /* wait for reply, if requested */
  71. if (reply) {
  72. wait_event(pmu->recv.wait, (pmu->recv.process == 0));
  73. reply[0] = pmu->recv.data[0];
  74. reply[1] = pmu->recv.data[1];
  75. mutex_unlock(&subdev->mutex);
  76. }
  77. return 0;
  78. }
  79. static void
  80. nvkm_pmu_recv(struct work_struct *work)
  81. {
  82. struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
  83. struct nvkm_subdev *subdev = &pmu->subdev;
  84. struct nvkm_device *device = subdev->device;
  85. u32 process, message, data0, data1;
  86. /* nothing to do if GET == PUT */
  87. u32 addr = nvkm_rd32(device, 0x10a4cc);
  88. if (addr == nvkm_rd32(device, 0x10a4c8))
  89. return;
  90. /* acquire data segment access */
  91. do {
  92. nvkm_wr32(device, 0x10a580, 0x00000002);
  93. } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
  94. /* read the packet */
  95. nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
  96. pmu->recv.base));
  97. process = nvkm_rd32(device, 0x10a1c4);
  98. message = nvkm_rd32(device, 0x10a1c4);
  99. data0 = nvkm_rd32(device, 0x10a1c4);
  100. data1 = nvkm_rd32(device, 0x10a1c4);
  101. nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
  102. /* release data segment access */
  103. nvkm_wr32(device, 0x10a580, 0x00000000);
  104. /* wake process if it's waiting on a synchronous reply */
  105. if (pmu->recv.process) {
  106. if (process == pmu->recv.process &&
  107. message == pmu->recv.message) {
  108. pmu->recv.data[0] = data0;
  109. pmu->recv.data[1] = data1;
  110. pmu->recv.process = 0;
  111. wake_up(&pmu->recv.wait);
  112. return;
  113. }
  114. }
  115. /* right now there's no other expected responses from the engine,
  116. * so assume that any unexpected message is an error.
  117. */
  118. nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
  119. (char)((process & 0x000000ff) >> 0),
  120. (char)((process & 0x0000ff00) >> 8),
  121. (char)((process & 0x00ff0000) >> 16),
  122. (char)((process & 0xff000000) >> 24),
  123. process, message, data0, data1);
  124. }
  125. static void
  126. nvkm_pmu_intr(struct nvkm_subdev *subdev)
  127. {
  128. struct nvkm_pmu *pmu = nvkm_pmu(subdev);
  129. struct nvkm_device *device = pmu->subdev.device;
  130. u32 disp = nvkm_rd32(device, 0x10a01c);
  131. u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
  132. if (intr & 0x00000020) {
  133. u32 stat = nvkm_rd32(device, 0x10a16c);
  134. if (stat & 0x80000000) {
  135. nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
  136. stat & 0x00ffffff,
  137. nvkm_rd32(device, 0x10a168));
  138. nvkm_wr32(device, 0x10a16c, 0x00000000);
  139. intr &= ~0x00000020;
  140. }
  141. }
  142. if (intr & 0x00000040) {
  143. schedule_work(&pmu->recv.work);
  144. nvkm_wr32(device, 0x10a004, 0x00000040);
  145. intr &= ~0x00000040;
  146. }
  147. if (intr & 0x00000080) {
  148. nvkm_info(subdev, "wr32 %06x %08x\n",
  149. nvkm_rd32(device, 0x10a7a0),
  150. nvkm_rd32(device, 0x10a7a4));
  151. nvkm_wr32(device, 0x10a004, 0x00000080);
  152. intr &= ~0x00000080;
  153. }
  154. if (intr) {
  155. nvkm_error(subdev, "intr %08x\n", intr);
  156. nvkm_wr32(device, 0x10a004, intr);
  157. }
  158. }
  159. static int
  160. nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
  161. {
  162. struct nvkm_pmu *pmu = nvkm_pmu(subdev);
  163. struct nvkm_device *device = pmu->subdev.device;
  164. nvkm_wr32(device, 0x10a014, 0x00000060);
  165. flush_work(&pmu->recv.work);
  166. return 0;
  167. }
  168. static int
  169. nvkm_pmu_init(struct nvkm_subdev *subdev)
  170. {
  171. struct nvkm_pmu *pmu = nvkm_pmu(subdev);
  172. struct nvkm_device *device = pmu->subdev.device;
  173. int i;
  174. /* prevent previous ucode from running, wait for idle, reset */
  175. nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
  176. nvkm_msec(device, 2000,
  177. if (!nvkm_rd32(device, 0x10a04c))
  178. break;
  179. );
  180. nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
  181. nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
  182. nvkm_rd32(device, 0x000200);
  183. nvkm_msec(device, 2000,
  184. if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
  185. break;
  186. );
  187. /* upload data segment */
  188. nvkm_wr32(device, 0x10a1c0, 0x01000000);
  189. for (i = 0; i < pmu->func->data.size / 4; i++)
  190. nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
  191. /* upload code segment */
  192. nvkm_wr32(device, 0x10a180, 0x01000000);
  193. for (i = 0; i < pmu->func->code.size / 4; i++) {
  194. if ((i & 0x3f) == 0)
  195. nvkm_wr32(device, 0x10a188, i >> 6);
  196. nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
  197. }
  198. /* start it running */
  199. nvkm_wr32(device, 0x10a10c, 0x00000000);
  200. nvkm_wr32(device, 0x10a104, 0x00000000);
  201. nvkm_wr32(device, 0x10a100, 0x00000002);
  202. /* wait for valid host->pmu ring configuration */
  203. if (nvkm_msec(device, 2000,
  204. if (nvkm_rd32(device, 0x10a4d0))
  205. break;
  206. ) < 0)
  207. return -EBUSY;
  208. pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
  209. pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
  210. /* wait for valid pmu->host ring configuration */
  211. if (nvkm_msec(device, 2000,
  212. if (nvkm_rd32(device, 0x10a4dc))
  213. break;
  214. ) < 0)
  215. return -EBUSY;
  216. pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
  217. pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
  218. nvkm_wr32(device, 0x10a010, 0x000000e0);
  219. return 0;
  220. }
  221. static void *
  222. nvkm_pmu_dtor(struct nvkm_subdev *subdev)
  223. {
  224. return nvkm_pmu(subdev);
  225. }
  226. static const struct nvkm_subdev_func
  227. nvkm_pmu = {
  228. .dtor = nvkm_pmu_dtor,
  229. .init = nvkm_pmu_init,
  230. .fini = nvkm_pmu_fini,
  231. .intr = nvkm_pmu_intr,
  232. };
  233. int
  234. nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
  235. int index, struct nvkm_pmu **ppmu)
  236. {
  237. struct nvkm_pmu *pmu;
  238. if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
  239. return -ENOMEM;
  240. nvkm_subdev_ctor(&nvkm_pmu, device, index, 0, &pmu->subdev);
  241. pmu->func = func;
  242. INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
  243. init_waitqueue_head(&pmu->recv.wait);
  244. return 0;
  245. }