msgqueue_0137c63d.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /*
  2. * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "msgqueue.h"
  24. #include <engine/falcon.h>
  25. #include <subdev/secboot.h>
  26. /* Queues identifiers */
  27. enum {
  28. /* High Priority Command Queue for Host -> PMU communication */
  29. MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
  30. /* Low Priority Command Queue for Host -> PMU communication */
  31. MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
  32. /* Message queue for PMU -> Host communication */
  33. MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
  34. MSGQUEUE_0137C63D_NUM_QUEUES = 5,
  35. };
  36. struct msgqueue_0137c63d {
  37. struct nvkm_msgqueue base;
  38. struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
  39. };
  40. #define msgqueue_0137c63d(q) \
  41. container_of(q, struct msgqueue_0137c63d, base)
  42. struct msgqueue_0137bca5 {
  43. struct msgqueue_0137c63d base;
  44. u64 wpr_addr;
  45. };
  46. #define msgqueue_0137bca5(q) \
  47. container_of(container_of(q, struct msgqueue_0137c63d, base), \
  48. struct msgqueue_0137bca5, base);
  49. static struct nvkm_msgqueue_queue *
  50. msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
  51. enum msgqueue_msg_priority priority)
  52. {
  53. struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
  54. const struct nvkm_subdev *subdev = priv->base.falcon->owner;
  55. switch (priority) {
  56. case MSGQUEUE_MSG_PRIORITY_HIGH:
  57. return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
  58. case MSGQUEUE_MSG_PRIORITY_LOW:
  59. return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
  60. default:
  61. nvkm_error(subdev, "invalid command queue!\n");
  62. return ERR_PTR(-EINVAL);
  63. }
  64. }
  65. static void
  66. msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
  67. {
  68. struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
  69. struct nvkm_msgqueue_queue *q_queue =
  70. &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
  71. nvkm_msgqueue_process_msgs(&priv->base, q_queue);
  72. }
  73. /* Init unit */
  74. #define MSGQUEUE_0137C63D_UNIT_INIT 0x07
  75. enum {
  76. INIT_MSG_INIT = 0x0,
  77. };
  78. static void
  79. init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
  80. {
  81. struct {
  82. u32 reserved;
  83. u32 freq_hz;
  84. u32 trace_size;
  85. u32 trace_dma_base;
  86. u16 trace_dma_base1;
  87. u8 trace_dma_offset;
  88. u32 trace_dma_idx;
  89. bool secure_mode;
  90. bool raise_priv_sec;
  91. struct {
  92. u32 dma_base;
  93. u16 dma_base1;
  94. u8 dma_offset;
  95. u16 fb_size;
  96. u8 dma_idx;
  97. } gc6_ctx;
  98. u8 pad;
  99. } *args = buf;
  100. args->secure_mode = 1;
  101. }
  102. /* forward declaration */
  103. static int acr_init_wpr(struct nvkm_msgqueue *queue);
  104. static int
  105. init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
  106. {
  107. struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
  108. struct {
  109. struct nvkm_msgqueue_msg base;
  110. u8 pad;
  111. u16 os_debug_entry_point;
  112. struct {
  113. u16 size;
  114. u16 offset;
  115. u8 index;
  116. u8 pad;
  117. } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
  118. u16 sw_managed_area_offset;
  119. u16 sw_managed_area_size;
  120. } *init = (void *)hdr;
  121. const struct nvkm_subdev *subdev = _queue->falcon->owner;
  122. int i;
  123. if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
  124. nvkm_error(subdev, "expected message from init unit\n");
  125. return -EINVAL;
  126. }
  127. if (init->base.msg_type != INIT_MSG_INIT) {
  128. nvkm_error(subdev, "expected PMU init msg\n");
  129. return -EINVAL;
  130. }
  131. for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
  132. struct nvkm_msgqueue_queue *queue = &priv->queue[i];
  133. mutex_init(&queue->mutex);
  134. queue->index = init->queue_info[i].index;
  135. queue->offset = init->queue_info[i].offset;
  136. queue->size = init->queue_info[i].size;
  137. if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
  138. queue->head_reg = 0x4a0 + (queue->index * 4);
  139. queue->tail_reg = 0x4b0 + (queue->index * 4);
  140. } else {
  141. queue->head_reg = 0x4c8;
  142. queue->tail_reg = 0x4cc;
  143. }
  144. nvkm_debug(subdev,
  145. "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
  146. i, queue->index, queue->offset, queue->size);
  147. }
  148. /* Complete initialization by initializing WPR region */
  149. return acr_init_wpr(&priv->base);
  150. }
  151. static const struct nvkm_msgqueue_init_func
  152. msgqueue_0137c63d_init_func = {
  153. .gen_cmdline = init_gen_cmdline,
  154. .init_callback = init_callback,
  155. };
  156. /* ACR unit */
  157. #define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
  158. enum {
  159. ACR_CMD_INIT_WPR_REGION = 0x00,
  160. ACR_CMD_BOOTSTRAP_FALCON = 0x01,
  161. ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
  162. };
  163. static void
  164. acr_init_wpr_callback(struct nvkm_msgqueue *queue,
  165. struct nvkm_msgqueue_hdr *hdr)
  166. {
  167. struct {
  168. struct nvkm_msgqueue_msg base;
  169. u32 error_code;
  170. } *msg = (void *)hdr;
  171. const struct nvkm_subdev *subdev = queue->falcon->owner;
  172. if (msg->error_code) {
  173. nvkm_error(subdev, "ACR WPR init failure: %d\n",
  174. msg->error_code);
  175. return;
  176. }
  177. nvkm_debug(subdev, "ACR WPR init complete\n");
  178. complete_all(&queue->init_done);
  179. }
  180. static int
  181. acr_init_wpr(struct nvkm_msgqueue *queue)
  182. {
  183. /*
  184. * region_id: region ID in WPR region
  185. * wpr_offset: offset in WPR region
  186. */
  187. struct {
  188. struct nvkm_msgqueue_hdr hdr;
  189. u8 cmd_type;
  190. u32 region_id;
  191. u32 wpr_offset;
  192. } cmd;
  193. memset(&cmd, 0, sizeof(cmd));
  194. cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
  195. cmd.hdr.size = sizeof(cmd);
  196. cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
  197. cmd.region_id = 0x01;
  198. cmd.wpr_offset = 0x00;
  199. nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
  200. acr_init_wpr_callback, NULL, false);
  201. return 0;
  202. }
  203. static void
  204. acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
  205. struct nvkm_msgqueue_hdr *hdr)
  206. {
  207. struct acr_bootstrap_falcon_msg {
  208. struct nvkm_msgqueue_msg base;
  209. u32 falcon_id;
  210. } *msg = (void *)hdr;
  211. const struct nvkm_subdev *subdev = priv->falcon->owner;
  212. u32 falcon_id = msg->falcon_id;
  213. if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
  214. nvkm_error(subdev, "in bootstrap falcon callback:\n");
  215. nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
  216. return;
  217. }
  218. nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
  219. }
  220. enum {
  221. ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
  222. ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
  223. };
  224. static int
  225. acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
  226. {
  227. DECLARE_COMPLETION_ONSTACK(completed);
  228. /*
  229. * flags - Flag specifying RESET or no RESET.
  230. * falcon id - Falcon id specifying falcon to bootstrap.
  231. */
  232. struct {
  233. struct nvkm_msgqueue_hdr hdr;
  234. u8 cmd_type;
  235. u32 flags;
  236. u32 falcon_id;
  237. } cmd;
  238. memset(&cmd, 0, sizeof(cmd));
  239. cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
  240. cmd.hdr.size = sizeof(cmd);
  241. cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
  242. cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
  243. cmd.falcon_id = falcon;
  244. nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
  245. acr_boot_falcon_callback, &completed, true);
  246. if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
  247. return -ETIMEDOUT;
  248. return 0;
  249. }
  250. static void
  251. acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
  252. struct nvkm_msgqueue_hdr *hdr)
  253. {
  254. struct acr_bootstrap_falcon_msg {
  255. struct nvkm_msgqueue_msg base;
  256. u32 falcon_mask;
  257. } *msg = (void *)hdr;
  258. const struct nvkm_subdev *subdev = priv->falcon->owner;
  259. unsigned long falcon_mask = msg->falcon_mask;
  260. u32 falcon_id, falcon_treated = 0;
  261. for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
  262. nvkm_debug(subdev, "%s booted\n",
  263. nvkm_secboot_falcon_name[falcon_id]);
  264. falcon_treated |= BIT(falcon_id);
  265. }
  266. if (falcon_treated != msg->falcon_mask) {
  267. nvkm_error(subdev, "in bootstrap falcon callback:\n");
  268. nvkm_error(subdev, "invalid falcon mask 0x%x\n",
  269. msg->falcon_mask);
  270. return;
  271. }
  272. }
  273. static int
  274. acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
  275. {
  276. DECLARE_COMPLETION_ONSTACK(completed);
  277. /*
  278. * flags - Flag specifying RESET or no RESET.
  279. * falcon id - Falcon id specifying falcon to bootstrap.
  280. */
  281. struct {
  282. struct nvkm_msgqueue_hdr hdr;
  283. u8 cmd_type;
  284. u32 flags;
  285. u32 falcon_mask;
  286. u32 use_va_mask;
  287. u32 wpr_lo;
  288. u32 wpr_hi;
  289. } cmd;
  290. struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
  291. memset(&cmd, 0, sizeof(cmd));
  292. cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
  293. cmd.hdr.size = sizeof(cmd);
  294. cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
  295. cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
  296. cmd.falcon_mask = falcon_mask;
  297. cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
  298. cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
  299. nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
  300. acr_boot_multiple_falcons_callback, &completed, true);
  301. if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
  302. return -ETIMEDOUT;
  303. return 0;
  304. }
  305. static const struct nvkm_msgqueue_acr_func
  306. msgqueue_0137c63d_acr_func = {
  307. .boot_falcon = acr_boot_falcon,
  308. };
  309. static const struct nvkm_msgqueue_acr_func
  310. msgqueue_0137bca5_acr_func = {
  311. .boot_falcon = acr_boot_falcon,
  312. .boot_multiple_falcons = acr_boot_multiple_falcons,
  313. };
  314. static void
  315. msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
  316. {
  317. kfree(msgqueue_0137c63d(queue));
  318. }
  319. static const struct nvkm_msgqueue_func
  320. msgqueue_0137c63d_func = {
  321. .init_func = &msgqueue_0137c63d_init_func,
  322. .acr_func = &msgqueue_0137c63d_acr_func,
  323. .cmd_queue = msgqueue_0137c63d_cmd_queue,
  324. .recv = msgqueue_0137c63d_process_msgs,
  325. .dtor = msgqueue_0137c63d_dtor,
  326. };
  327. int
  328. msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
  329. struct nvkm_msgqueue **queue)
  330. {
  331. struct msgqueue_0137c63d *ret;
  332. ret = kzalloc(sizeof(*ret), GFP_KERNEL);
  333. if (!ret)
  334. return -ENOMEM;
  335. *queue = &ret->base;
  336. nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
  337. return 0;
  338. }
  339. static const struct nvkm_msgqueue_func
  340. msgqueue_0137bca5_func = {
  341. .init_func = &msgqueue_0137c63d_init_func,
  342. .acr_func = &msgqueue_0137bca5_acr_func,
  343. .cmd_queue = msgqueue_0137c63d_cmd_queue,
  344. .recv = msgqueue_0137c63d_process_msgs,
  345. .dtor = msgqueue_0137c63d_dtor,
  346. };
  347. int
  348. msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
  349. struct nvkm_msgqueue **queue)
  350. {
  351. struct msgqueue_0137bca5 *ret;
  352. ret = kzalloc(sizeof(*ret), GFP_KERNEL);
  353. if (!ret)
  354. return -ENOMEM;
  355. *queue = &ret->base.base;
  356. /*
  357. * FIXME this must be set to the address of a *GPU* mapping within the
  358. * ACR address space!
  359. */
  360. /* ret->wpr_addr = sb->wpr_addr; */
  361. nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
  362. return 0;
  363. }