a6xx_hfi.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
  3. #include <linux/completion.h>
  4. #include <linux/circ_buf.h>
  5. #include <linux/list.h>
  6. #include "a6xx_gmu.h"
  7. #include "a6xx_gmu.xml.h"
  8. #define HFI_MSG_ID(val) [val] = #val
  9. static const char * const a6xx_hfi_msg_id[] = {
  10. HFI_MSG_ID(HFI_H2F_MSG_INIT),
  11. HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
  12. HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
  13. HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
  14. HFI_MSG_ID(HFI_H2F_MSG_TEST),
  15. };
  16. static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
  17. u32 dwords)
  18. {
  19. struct a6xx_hfi_queue_header *header = queue->header;
  20. u32 i, hdr, index = header->read_index;
  21. if (header->read_index == header->write_index) {
  22. header->rx_request = 1;
  23. return 0;
  24. }
  25. hdr = queue->data[index];
  26. /*
  27. * If we are to assume that the GMU firmware is in fact a rational actor
  28. * and is programmed to not send us a larger response than we expect
  29. * then we can also assume that if the header size is unexpectedly large
  30. * that it is due to memory corruption and/or hardware failure. In this
  31. * case the only reasonable course of action is to BUG() to help harden
  32. * the failure.
  33. */
  34. BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
  35. for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
  36. data[i] = queue->data[index];
  37. index = (index + 1) % header->size;
  38. }
  39. header->read_index = index;
  40. return HFI_HEADER_SIZE(hdr);
  41. }
  42. static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
  43. struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
  44. {
  45. struct a6xx_hfi_queue_header *header = queue->header;
  46. u32 i, space, index = header->write_index;
  47. spin_lock(&queue->lock);
  48. space = CIRC_SPACE(header->write_index, header->read_index,
  49. header->size);
  50. if (space < dwords) {
  51. header->dropped++;
  52. spin_unlock(&queue->lock);
  53. return -ENOSPC;
  54. }
  55. for (i = 0; i < dwords; i++) {
  56. queue->data[index] = data[i];
  57. index = (index + 1) % header->size;
  58. }
  59. header->write_index = index;
  60. spin_unlock(&queue->lock);
  61. gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
  62. return 0;
  63. }
  64. static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
  65. u32 *payload, u32 payload_size)
  66. {
  67. struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
  68. u32 val;
  69. int ret;
  70. /* Wait for a response */
  71. ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
  72. val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
  73. if (ret) {
  74. dev_err(gmu->dev,
  75. "Message %s id %d timed out waiting for response\n",
  76. a6xx_hfi_msg_id[id], seqnum);
  77. return -ETIMEDOUT;
  78. }
  79. /* Clear the interrupt */
  80. gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
  81. A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
  82. for (;;) {
  83. struct a6xx_hfi_msg_response resp;
  84. /* Get the next packet */
  85. ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
  86. sizeof(resp) >> 2);
  87. /* If the queue is empty our response never made it */
  88. if (!ret) {
  89. dev_err(gmu->dev,
  90. "The HFI response queue is unexpectedly empty\n");
  91. return -ENOENT;
  92. }
  93. if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
  94. struct a6xx_hfi_msg_error *error =
  95. (struct a6xx_hfi_msg_error *) &resp;
  96. dev_err(gmu->dev, "GMU firmware error %d\n",
  97. error->code);
  98. continue;
  99. }
  100. if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
  101. dev_err(gmu->dev,
  102. "Unexpected message id %d on the response queue\n",
  103. HFI_HEADER_SEQNUM(resp.ret_header));
  104. continue;
  105. }
  106. if (resp.error) {
  107. dev_err(gmu->dev,
  108. "Message %s id %d returned error %d\n",
  109. a6xx_hfi_msg_id[id], seqnum, resp.error);
  110. return -EINVAL;
  111. }
  112. /* All is well, copy over the buffer */
  113. if (payload && payload_size)
  114. memcpy(payload, resp.payload,
  115. min_t(u32, payload_size, sizeof(resp.payload)));
  116. return 0;
  117. }
  118. }
  119. static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
  120. void *data, u32 size, u32 *payload, u32 payload_size)
  121. {
  122. struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
  123. int ret, dwords = size >> 2;
  124. u32 seqnum;
  125. seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
  126. /* First dword of the message is the message header - fill it in */
  127. *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
  128. (dwords << 8) | id;
  129. ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
  130. if (ret) {
  131. dev_err(gmu->dev, "Unable to send message %s id %d\n",
  132. a6xx_hfi_msg_id[id], seqnum);
  133. return ret;
  134. }
  135. return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
  136. }
  137. static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
  138. {
  139. struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
  140. msg.dbg_buffer_addr = (u32) gmu->debug->iova;
  141. msg.dbg_buffer_size = (u32) gmu->debug->size;
  142. msg.boot_state = boot_state;
  143. return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
  144. NULL, 0);
  145. }
  146. static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
  147. {
  148. struct a6xx_hfi_msg_fw_version msg = { 0 };
  149. /* Currently supporting version 1.1 */
  150. msg.supported_version = (1 << 28) | (1 << 16);
  151. return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
  152. version, sizeof(*version));
  153. }
  154. static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
  155. {
  156. struct a6xx_hfi_msg_perf_table msg = { 0 };
  157. int i;
  158. msg.num_gpu_levels = gmu->nr_gpu_freqs;
  159. msg.num_gmu_levels = gmu->nr_gmu_freqs;
  160. for (i = 0; i < gmu->nr_gpu_freqs; i++) {
  161. msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
  162. msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
  163. }
  164. for (i = 0; i < gmu->nr_gmu_freqs; i++) {
  165. msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
  166. msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
  167. }
  168. return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
  169. NULL, 0);
  170. }
  171. static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
  172. {
  173. struct a6xx_hfi_msg_bw_table msg = { 0 };
  174. /*
  175. * The sdm845 GMU doesn't do bus frequency scaling on its own but it
  176. * does need at least one entry in the list because it might be accessed
  177. * when the GMU is shutting down. Send a single "off" entry.
  178. */
  179. msg.bw_level_num = 1;
  180. msg.ddr_cmds_num = 3;
  181. msg.ddr_wait_bitmask = 0x07;
  182. msg.ddr_cmds_addrs[0] = 0x50000;
  183. msg.ddr_cmds_addrs[1] = 0x5005c;
  184. msg.ddr_cmds_addrs[2] = 0x5000c;
  185. msg.ddr_cmds_data[0][0] = 0x40000000;
  186. msg.ddr_cmds_data[0][1] = 0x40000000;
  187. msg.ddr_cmds_data[0][2] = 0x40000000;
  188. /*
  189. * These are the CX (CNOC) votes. This is used but the values for the
  190. * sdm845 GMU are known and fixed so we can hard code them.
  191. */
  192. msg.cnoc_cmds_num = 3;
  193. msg.cnoc_wait_bitmask = 0x05;
  194. msg.cnoc_cmds_addrs[0] = 0x50034;
  195. msg.cnoc_cmds_addrs[1] = 0x5007c;
  196. msg.cnoc_cmds_addrs[2] = 0x5004c;
  197. msg.cnoc_cmds_data[0][0] = 0x40000000;
  198. msg.cnoc_cmds_data[0][1] = 0x00000000;
  199. msg.cnoc_cmds_data[0][2] = 0x40000000;
  200. msg.cnoc_cmds_data[1][0] = 0x60000001;
  201. msg.cnoc_cmds_data[1][1] = 0x20000001;
  202. msg.cnoc_cmds_data[1][2] = 0x60000001;
  203. return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
  204. NULL, 0);
  205. }
  206. static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
  207. {
  208. struct a6xx_hfi_msg_test msg = { 0 };
  209. return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
  210. NULL, 0);
  211. }
  212. int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
  213. {
  214. int ret;
  215. ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
  216. if (ret)
  217. return ret;
  218. ret = a6xx_hfi_get_fw_version(gmu, NULL);
  219. if (ret)
  220. return ret;
  221. /*
  222. * We have to get exchange version numbers per the sequence but at this
  223. * point th kernel driver doesn't need to know the exact version of
  224. * the GMU firmware
  225. */
  226. ret = a6xx_hfi_send_perf_table(gmu);
  227. if (ret)
  228. return ret;
  229. ret = a6xx_hfi_send_bw_table(gmu);
  230. if (ret)
  231. return ret;
  232. /*
  233. * Let the GMU know that there won't be any more HFI messages until next
  234. * boot
  235. */
  236. a6xx_hfi_send_test(gmu);
  237. return 0;
  238. }
  239. void a6xx_hfi_stop(struct a6xx_gmu *gmu)
  240. {
  241. int i;
  242. for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
  243. struct a6xx_hfi_queue *queue = &gmu->queues[i];
  244. if (!queue->header)
  245. continue;
  246. if (queue->header->read_index != queue->header->write_index)
  247. dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
  248. queue->header->read_index = 0;
  249. queue->header->write_index = 0;
  250. }
  251. }
  252. static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
  253. struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
  254. u32 id)
  255. {
  256. spin_lock_init(&queue->lock);
  257. queue->header = header;
  258. queue->data = virt;
  259. atomic_set(&queue->seqnum, 0);
  260. /* Set up the shared memory header */
  261. header->iova = iova;
  262. header->type = 10 << 8 | id;
  263. header->status = 1;
  264. header->size = SZ_4K >> 2;
  265. header->msg_size = 0;
  266. header->dropped = 0;
  267. header->rx_watermark = 1;
  268. header->tx_watermark = 1;
  269. header->rx_request = 1;
  270. header->tx_request = 0;
  271. header->read_index = 0;
  272. header->write_index = 0;
  273. }
  274. void a6xx_hfi_init(struct a6xx_gmu *gmu)
  275. {
  276. struct a6xx_gmu_bo *hfi = gmu->hfi;
  277. struct a6xx_hfi_queue_table_header *table = hfi->virt;
  278. struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
  279. u64 offset;
  280. int table_size;
  281. /*
  282. * The table size is the size of the table header plus all of the queue
  283. * headers
  284. */
  285. table_size = sizeof(*table);
  286. table_size += (ARRAY_SIZE(gmu->queues) *
  287. sizeof(struct a6xx_hfi_queue_header));
  288. table->version = 0;
  289. table->size = table_size;
  290. /* First queue header is located immediately after the table header */
  291. table->qhdr0_offset = sizeof(*table) >> 2;
  292. table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
  293. table->num_queues = ARRAY_SIZE(gmu->queues);
  294. table->active_queues = ARRAY_SIZE(gmu->queues);
  295. /* Command queue */
  296. offset = SZ_4K;
  297. a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
  298. hfi->iova + offset, 0);
  299. /* GMU response queue */
  300. offset += SZ_4K;
  301. a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
  302. hfi->iova + offset, 4);
  303. }