gk20a.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. */
  22. #define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev)
  23. #include "priv.h"
  24. #include <subdev/clk.h>
  25. #include <subdev/timer.h>
  26. #include <subdev/volt.h>
  27. #define BUSY_SLOT 0
  28. #define CLK_SLOT 7
  29. struct gk20a_pmu_dvfs_data {
  30. int p_load_target;
  31. int p_load_max;
  32. int p_smooth;
  33. unsigned int avg_load;
  34. };
  35. struct gk20a_pmu {
  36. struct nvkm_pmu base;
  37. struct nvkm_alarm alarm;
  38. struct gk20a_pmu_dvfs_data *data;
  39. };
  40. struct gk20a_pmu_dvfs_dev_status {
  41. unsigned long total;
  42. unsigned long busy;
  43. int cur_state;
  44. };
  45. static int
  46. gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
  47. {
  48. struct nvkm_clk *clk = pmu->base.subdev.device->clk;
  49. return nvkm_clk_astate(clk, *state, 0, false);
  50. }
  51. static int
  52. gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
  53. {
  54. struct nvkm_clk *clk = pmu->base.subdev.device->clk;
  55. *state = clk->pstate;
  56. return 0;
  57. }
  58. static int
  59. gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
  60. int *state, int load)
  61. {
  62. struct gk20a_pmu_dvfs_data *data = pmu->data;
  63. struct nvkm_clk *clk = pmu->base.subdev.device->clk;
  64. int cur_level, level;
  65. /* For GK20A, the performance level is directly mapped to pstate */
  66. level = cur_level = clk->pstate;
  67. if (load > data->p_load_max) {
  68. level = min(clk->state_nr - 1, level + (clk->state_nr / 3));
  69. } else {
  70. level += ((load - data->p_load_target) * 10 /
  71. data->p_load_target) / 2;
  72. level = max(0, level);
  73. level = min(clk->state_nr - 1, level);
  74. }
  75. nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
  76. cur_level, level);
  77. *state = level;
  78. if (level == cur_level)
  79. return 0;
  80. else
  81. return 1;
  82. }
  83. static int
  84. gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
  85. struct gk20a_pmu_dvfs_dev_status *status)
  86. {
  87. struct nvkm_device *device = pmu->base.subdev.device;
  88. status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10));
  89. status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10));
  90. return 0;
  91. }
  92. static void
  93. gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
  94. {
  95. struct nvkm_device *device = pmu->base.subdev.device;
  96. nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
  97. nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
  98. }
  99. static void
  100. gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
  101. {
  102. struct gk20a_pmu *pmu =
  103. container_of(alarm, struct gk20a_pmu, alarm);
  104. struct gk20a_pmu_dvfs_data *data = pmu->data;
  105. struct gk20a_pmu_dvfs_dev_status status;
  106. struct nvkm_subdev *subdev = &pmu->base.subdev;
  107. struct nvkm_device *device = subdev->device;
  108. struct nvkm_clk *clk = device->clk;
  109. struct nvkm_timer *tmr = device->timer;
  110. struct nvkm_volt *volt = device->volt;
  111. u32 utilization = 0;
  112. int state, ret;
  113. /*
  114. * The PMU is initialized before CLK and VOLT, so we have to make sure the
  115. * CLK and VOLT are ready here.
  116. */
  117. if (!clk || !volt)
  118. goto resched;
  119. ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status);
  120. if (ret) {
  121. nvkm_warn(subdev, "failed to get device status\n");
  122. goto resched;
  123. }
  124. if (status.total)
  125. utilization = div_u64((u64)status.busy * 100, status.total);
  126. data->avg_load = (data->p_smooth * data->avg_load) + utilization;
  127. data->avg_load /= data->p_smooth + 1;
  128. nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
  129. utilization, data->avg_load);
  130. ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state);
  131. if (ret) {
  132. nvkm_warn(subdev, "failed to get current state\n");
  133. goto resched;
  134. }
  135. if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
  136. nvkm_trace(subdev, "set new state to %d\n", state);
  137. gk20a_pmu_dvfs_target(pmu, &state);
  138. }
  139. resched:
  140. gk20a_pmu_dvfs_reset_dev_status(pmu);
  141. nvkm_timer_alarm(tmr, 100000000, alarm);
  142. }
  143. static int
  144. gk20a_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
  145. {
  146. struct gk20a_pmu *pmu = gk20a_pmu(subdev);
  147. nvkm_timer_alarm_cancel(subdev->device->timer, &pmu->alarm);
  148. return 0;
  149. }
  150. static void *
  151. gk20a_pmu_dtor(struct nvkm_subdev *subdev)
  152. {
  153. return gk20a_pmu(subdev);
  154. }
  155. static int
  156. gk20a_pmu_init(struct nvkm_subdev *subdev)
  157. {
  158. struct gk20a_pmu *pmu = gk20a_pmu(subdev);
  159. struct nvkm_device *device = pmu->base.subdev.device;
  160. /* init pwr perf counter */
  161. nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
  162. nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
  163. nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
  164. nvkm_timer_alarm(device->timer, 2000000000, &pmu->alarm);
  165. return 0;
  166. }
  167. static struct gk20a_pmu_dvfs_data
  168. gk20a_dvfs_data= {
  169. .p_load_target = 70,
  170. .p_load_max = 90,
  171. .p_smooth = 1,
  172. };
  173. static const struct nvkm_subdev_func
  174. gk20a_pmu = {
  175. .init = gk20a_pmu_init,
  176. .fini = gk20a_pmu_fini,
  177. .dtor = gk20a_pmu_dtor,
  178. };
  179. int
  180. gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
  181. {
  182. static const struct nvkm_pmu_func func = {};
  183. struct gk20a_pmu *pmu;
  184. if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
  185. return -ENOMEM;
  186. pmu->base.func = &func;
  187. *ppmu = &pmu->base;
  188. nvkm_subdev_ctor(&gk20a_pmu, device, index, &pmu->base.subdev);
  189. pmu->data = &gk20a_dvfs_data;
  190. nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
  191. return 0;
  192. }