base.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "priv.h"
  25. #include <core/msgqueue.h>
  26. #include <subdev/timer.h>
  27. void
  28. nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
  29. {
  30. if (pmu && pmu->func->pgob)
  31. pmu->func->pgob(pmu, enable);
  32. }
  33. static void
  34. nvkm_pmu_recv(struct work_struct *work)
  35. {
  36. struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
  37. return pmu->func->recv(pmu);
  38. }
  39. int
  40. nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
  41. u32 process, u32 message, u32 data0, u32 data1)
  42. {
  43. if (!pmu || !pmu->func->send)
  44. return -ENODEV;
  45. return pmu->func->send(pmu, reply, process, message, data0, data1);
  46. }
  47. static void
  48. nvkm_pmu_intr(struct nvkm_subdev *subdev)
  49. {
  50. struct nvkm_pmu *pmu = nvkm_pmu(subdev);
  51. if (!pmu->func->intr)
  52. return;
  53. pmu->func->intr(pmu);
  54. }
  55. static int
  56. nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
  57. {
  58. struct nvkm_pmu *pmu = nvkm_pmu(subdev);
  59. if (pmu->func->fini)
  60. pmu->func->fini(pmu);
  61. flush_work(&pmu->recv.work);
  62. return 0;
  63. }
  64. static int
  65. nvkm_pmu_reset(struct nvkm_pmu *pmu)
  66. {
  67. struct nvkm_device *device = pmu->subdev.device;
  68. if (!(nvkm_rd32(device, 0x000200) & 0x00002000))
  69. return 0;
  70. /* Inhibit interrupts, and wait for idle. */
  71. nvkm_wr32(device, 0x10a014, 0x0000ffff);
  72. nvkm_msec(device, 2000,
  73. if (!nvkm_rd32(device, 0x10a04c))
  74. break;
  75. );
  76. /* Reset. */
  77. if (pmu->func->reset)
  78. pmu->func->reset(pmu);
  79. /* Wait for IMEM/DMEM scrubbing to be complete. */
  80. nvkm_msec(device, 2000,
  81. if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
  82. break;
  83. );
  84. return 0;
  85. }
  86. static int
  87. nvkm_pmu_preinit(struct nvkm_subdev *subdev)
  88. {
  89. struct nvkm_pmu *pmu = nvkm_pmu(subdev);
  90. return nvkm_pmu_reset(pmu);
  91. }
  92. static int
  93. nvkm_pmu_init(struct nvkm_subdev *subdev)
  94. {
  95. struct nvkm_pmu *pmu = nvkm_pmu(subdev);
  96. int ret = nvkm_pmu_reset(pmu);
  97. if (ret == 0 && pmu->func->init)
  98. ret = pmu->func->init(pmu);
  99. return ret;
  100. }
  101. static int
  102. nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
  103. {
  104. struct nvkm_pmu *pmu = nvkm_pmu(subdev);
  105. return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
  106. }
  107. static void *
  108. nvkm_pmu_dtor(struct nvkm_subdev *subdev)
  109. {
  110. struct nvkm_pmu *pmu = nvkm_pmu(subdev);
  111. nvkm_msgqueue_del(&pmu->queue);
  112. nvkm_falcon_del(&pmu->falcon);
  113. return nvkm_pmu(subdev);
  114. }
  115. static const struct nvkm_subdev_func
  116. nvkm_pmu = {
  117. .dtor = nvkm_pmu_dtor,
  118. .preinit = nvkm_pmu_preinit,
  119. .oneinit = nvkm_pmu_oneinit,
  120. .init = nvkm_pmu_init,
  121. .fini = nvkm_pmu_fini,
  122. .intr = nvkm_pmu_intr,
  123. };
  124. int
  125. nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
  126. int index, struct nvkm_pmu *pmu)
  127. {
  128. nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
  129. pmu->func = func;
  130. INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
  131. init_waitqueue_head(&pmu->recv.wait);
  132. return 0;
  133. }
  134. int
  135. nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
  136. int index, struct nvkm_pmu **ppmu)
  137. {
  138. struct nvkm_pmu *pmu;
  139. if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
  140. return -ENOMEM;
  141. return nvkm_pmu_ctor(func, device, index, *ppmu);
  142. }