msgqueue.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. /*
  2. * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "msgqueue.h"
  24. #include <engine/falcon.h>
  25. #include <subdev/secboot.h>
  26. #define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
  27. #define QUEUE_ALIGNMENT 4
  28. /* max size of the messages we can receive */
  29. #define MSG_BUF_SIZE 128
  30. static int
  31. msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
  32. {
  33. struct nvkm_falcon *falcon = priv->falcon;
  34. mutex_lock(&queue->mutex);
  35. queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
  36. return 0;
  37. }
  38. static void
  39. msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
  40. bool commit)
  41. {
  42. struct nvkm_falcon *falcon = priv->falcon;
  43. if (commit)
  44. nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
  45. mutex_unlock(&queue->mutex);
  46. }
  47. static bool
  48. msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
  49. {
  50. struct nvkm_falcon *falcon = priv->falcon;
  51. u32 head, tail;
  52. head = nvkm_falcon_rd32(falcon, queue->head_reg);
  53. tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
  54. return head == tail;
  55. }
  56. static int
  57. msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
  58. void *data, u32 size)
  59. {
  60. struct nvkm_falcon *falcon = priv->falcon;
  61. const struct nvkm_subdev *subdev = priv->falcon->owner;
  62. u32 head, tail, available;
  63. head = nvkm_falcon_rd32(falcon, queue->head_reg);
  64. /* has the buffer looped? */
  65. if (head < queue->position)
  66. queue->position = queue->offset;
  67. tail = queue->position;
  68. available = head - tail;
  69. if (available == 0) {
  70. nvkm_warn(subdev, "no message data available\n");
  71. return 0;
  72. }
  73. if (size > available) {
  74. nvkm_warn(subdev, "message data smaller than read request\n");
  75. size = available;
  76. }
  77. nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
  78. queue->position += ALIGN(size, QUEUE_ALIGNMENT);
  79. return size;
  80. }
  81. static int
  82. msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
  83. struct nvkm_msgqueue_hdr *hdr)
  84. {
  85. const struct nvkm_subdev *subdev = priv->falcon->owner;
  86. int err;
  87. err = msg_queue_open(priv, queue);
  88. if (err) {
  89. nvkm_error(subdev, "fail to open queue %d\n", queue->index);
  90. return err;
  91. }
  92. if (msg_queue_empty(priv, queue)) {
  93. err = 0;
  94. goto close;
  95. }
  96. err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
  97. if (err >= 0 && err != HDR_SIZE)
  98. err = -EINVAL;
  99. if (err < 0) {
  100. nvkm_error(subdev, "failed to read message header: %d\n", err);
  101. goto close;
  102. }
  103. if (hdr->size > MSG_BUF_SIZE) {
  104. nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
  105. err = -ENOSPC;
  106. goto close;
  107. }
  108. if (hdr->size > HDR_SIZE) {
  109. u32 read_size = hdr->size - HDR_SIZE;
  110. err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
  111. if (err >= 0 && err != read_size)
  112. err = -EINVAL;
  113. if (err < 0) {
  114. nvkm_error(subdev, "failed to read message: %d\n", err);
  115. goto close;
  116. }
  117. }
  118. close:
  119. msg_queue_close(priv, queue, (err >= 0));
  120. return err;
  121. }
  122. static bool
  123. cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
  124. u32 size, bool *rewind)
  125. {
  126. struct nvkm_falcon *falcon = priv->falcon;
  127. u32 head, tail, free;
  128. size = ALIGN(size, QUEUE_ALIGNMENT);
  129. head = nvkm_falcon_rd32(falcon, queue->head_reg);
  130. tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
  131. if (head >= tail) {
  132. free = queue->offset + queue->size - head;
  133. free -= HDR_SIZE;
  134. if (size > free) {
  135. *rewind = true;
  136. head = queue->offset;
  137. }
  138. }
  139. if (head < tail)
  140. free = tail - head - 1;
  141. return size <= free;
  142. }
  143. static int
  144. cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
  145. void *data, u32 size)
  146. {
  147. nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
  148. queue->position += ALIGN(size, QUEUE_ALIGNMENT);
  149. return 0;
  150. }
  151. /* REWIND unit is always 0x00 */
  152. #define MSGQUEUE_UNIT_REWIND 0x00
  153. static void
  154. cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
  155. {
  156. const struct nvkm_subdev *subdev = priv->falcon->owner;
  157. struct nvkm_msgqueue_hdr cmd;
  158. int err;
  159. cmd.unit_id = MSGQUEUE_UNIT_REWIND;
  160. cmd.size = sizeof(cmd);
  161. err = cmd_queue_push(priv, queue, &cmd, cmd.size);
  162. if (err)
  163. nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
  164. else
  165. nvkm_error(subdev, "queue %d rewinded\n", queue->index);
  166. queue->position = queue->offset;
  167. }
  168. static int
  169. cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
  170. u32 size)
  171. {
  172. struct nvkm_falcon *falcon = priv->falcon;
  173. const struct nvkm_subdev *subdev = priv->falcon->owner;
  174. bool rewind = false;
  175. mutex_lock(&queue->mutex);
  176. if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
  177. nvkm_error(subdev, "queue full\n");
  178. mutex_unlock(&queue->mutex);
  179. return -EAGAIN;
  180. }
  181. queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
  182. if (rewind)
  183. cmd_queue_rewind(priv, queue);
  184. return 0;
  185. }
  186. static void
  187. cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
  188. bool commit)
  189. {
  190. struct nvkm_falcon *falcon = priv->falcon;
  191. if (commit)
  192. nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
  193. mutex_unlock(&queue->mutex);
  194. }
  195. static int
  196. cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
  197. struct nvkm_msgqueue_queue *queue)
  198. {
  199. const struct nvkm_subdev *subdev = priv->falcon->owner;
  200. static unsigned long timeout = ~0;
  201. unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
  202. int ret = -EAGAIN;
  203. bool commit = true;
  204. while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
  205. ret = cmd_queue_open(priv, queue, cmd->size);
  206. if (ret) {
  207. nvkm_error(subdev, "pmu_queue_open_write failed\n");
  208. return ret;
  209. }
  210. ret = cmd_queue_push(priv, queue, cmd, cmd->size);
  211. if (ret) {
  212. nvkm_error(subdev, "pmu_queue_push failed\n");
  213. commit = false;
  214. }
  215. cmd_queue_close(priv, queue, commit);
  216. return ret;
  217. }
  218. static struct nvkm_msgqueue_seq *
  219. msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
  220. {
  221. const struct nvkm_subdev *subdev = priv->falcon->owner;
  222. struct nvkm_msgqueue_seq *seq;
  223. u32 index;
  224. mutex_lock(&priv->seq_lock);
  225. index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
  226. if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
  227. nvkm_error(subdev, "no free sequence available\n");
  228. mutex_unlock(&priv->seq_lock);
  229. return ERR_PTR(-EAGAIN);
  230. }
  231. set_bit(index, priv->seq_tbl);
  232. mutex_unlock(&priv->seq_lock);
  233. seq = &priv->seq[index];
  234. seq->state = SEQ_STATE_PENDING;
  235. return seq;
  236. }
  237. static void
  238. msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
  239. {
  240. /* no need to acquire seq_lock since clear_bit is atomic */
  241. seq->state = SEQ_STATE_FREE;
  242. seq->callback = NULL;
  243. seq->completion = NULL;
  244. clear_bit(seq->id, priv->seq_tbl);
  245. }
  246. /* specifies that we want to know the command status in the answer message */
  247. #define CMD_FLAGS_STATUS BIT(0)
  248. /* specifies that we want an interrupt when the answer message is queued */
  249. #define CMD_FLAGS_INTR BIT(1)
  250. int
  251. nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
  252. struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
  253. struct completion *completion, bool wait_init)
  254. {
  255. struct nvkm_msgqueue_seq *seq;
  256. struct nvkm_msgqueue_queue *queue;
  257. int ret;
  258. if (wait_init && !wait_for_completion_timeout(&priv->init_done,
  259. msecs_to_jiffies(1000)))
  260. return -ETIMEDOUT;
  261. queue = priv->func->cmd_queue(priv, prio);
  262. if (IS_ERR(queue))
  263. return PTR_ERR(queue);
  264. seq = msgqueue_seq_acquire(priv);
  265. if (IS_ERR(seq))
  266. return PTR_ERR(seq);
  267. cmd->seq_id = seq->id;
  268. cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
  269. seq->callback = cb;
  270. seq->state = SEQ_STATE_USED;
  271. seq->completion = completion;
  272. ret = cmd_write(priv, cmd, queue);
  273. if (ret) {
  274. seq->state = SEQ_STATE_PENDING;
  275. msgqueue_seq_release(priv, seq);
  276. }
  277. return ret;
  278. }
  279. static int
  280. msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
  281. {
  282. const struct nvkm_subdev *subdev = priv->falcon->owner;
  283. struct nvkm_msgqueue_seq *seq;
  284. seq = &priv->seq[hdr->seq_id];
  285. if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
  286. nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
  287. return -EINVAL;
  288. }
  289. if (seq->state == SEQ_STATE_USED) {
  290. if (seq->callback)
  291. seq->callback(priv, hdr);
  292. }
  293. if (seq->completion)
  294. complete(seq->completion);
  295. msgqueue_seq_release(priv, seq);
  296. return 0;
  297. }
  298. static int
  299. msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
  300. struct nvkm_msgqueue_hdr *hdr)
  301. {
  302. struct nvkm_falcon *falcon = priv->falcon;
  303. const struct nvkm_subdev *subdev = falcon->owner;
  304. u32 tail;
  305. u32 tail_reg;
  306. int ret;
  307. /*
  308. * Of course the message queue registers vary depending on the falcon
  309. * used...
  310. */
  311. switch (falcon->owner->index) {
  312. case NVKM_SUBDEV_PMU:
  313. tail_reg = 0x4cc;
  314. break;
  315. case NVKM_ENGINE_SEC2:
  316. tail_reg = 0xa34;
  317. break;
  318. default:
  319. nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
  320. nvkm_subdev_name[falcon->owner->index]);
  321. return -EINVAL;
  322. }
  323. /*
  324. * Read the message - queues are not initialized yet so we cannot rely
  325. * on msg_queue_read()
  326. */
  327. tail = nvkm_falcon_rd32(falcon, tail_reg);
  328. nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
  329. if (hdr->size > MSG_BUF_SIZE) {
  330. nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
  331. return -ENOSPC;
  332. }
  333. nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
  334. (hdr + 1));
  335. tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
  336. nvkm_falcon_wr32(falcon, tail_reg, tail);
  337. ret = priv->func->init_func->init_callback(priv, hdr);
  338. if (ret)
  339. return ret;
  340. return 0;
  341. }
  342. void
  343. nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
  344. struct nvkm_msgqueue_queue *queue)
  345. {
  346. /*
  347. * We are invoked from a worker thread, so normally we have plenty of
  348. * stack space to work with.
  349. */
  350. u8 msg_buffer[MSG_BUF_SIZE];
  351. struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
  352. int ret;
  353. /* the first message we receive must be the init message */
  354. if ((!priv->init_msg_received)) {
  355. ret = msgqueue_handle_init_msg(priv, hdr);
  356. if (!ret)
  357. priv->init_msg_received = true;
  358. } else {
  359. while (msg_queue_read(priv, queue, hdr) > 0)
  360. msgqueue_msg_handle(priv, hdr);
  361. }
  362. }
  363. void
  364. nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
  365. {
  366. if (!queue || !queue->func || !queue->func->init_func)
  367. return;
  368. queue->func->init_func->gen_cmdline(queue, buf);
  369. }
  370. int
  371. nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
  372. unsigned long falcon_mask)
  373. {
  374. unsigned long falcon;
  375. if (!queue || !queue->func->acr_func)
  376. return -ENODEV;
  377. /* Does the firmware support booting multiple falcons? */
  378. if (queue->func->acr_func->boot_multiple_falcons)
  379. return queue->func->acr_func->boot_multiple_falcons(queue,
  380. falcon_mask);
  381. /* Else boot all requested falcons individually */
  382. if (!queue->func->acr_func->boot_falcon)
  383. return -ENODEV;
  384. for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
  385. int ret = queue->func->acr_func->boot_falcon(queue, falcon);
  386. if (ret)
  387. return ret;
  388. }
  389. return 0;
  390. }
  391. int
  392. nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
  393. const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
  394. {
  395. const struct nvkm_subdev *subdev = falcon->owner;
  396. int ret = -EINVAL;
  397. switch (version) {
  398. case 0x0137c63d:
  399. ret = msgqueue_0137c63d_new(falcon, sb, queue);
  400. break;
  401. case 0x0137bca5:
  402. ret = msgqueue_0137bca5_new(falcon, sb, queue);
  403. break;
  404. case 0x0148cdec:
  405. ret = msgqueue_0148cdec_new(falcon, sb, queue);
  406. break;
  407. default:
  408. nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
  409. version);
  410. break;
  411. }
  412. if (ret == 0) {
  413. nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
  414. (*queue)->fw_version = version;
  415. }
  416. return ret;
  417. }
  418. void
  419. nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
  420. {
  421. if (*queue) {
  422. (*queue)->func->dtor(*queue);
  423. *queue = NULL;
  424. }
  425. }
  426. void
  427. nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
  428. {
  429. if (!queue->func || !queue->func->recv) {
  430. const struct nvkm_subdev *subdev = queue->falcon->owner;
  431. nvkm_warn(subdev, "missing msgqueue recv function\n");
  432. return;
  433. }
  434. queue->func->recv(queue);
  435. }
  436. int
  437. nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
  438. {
  439. /* firmware not set yet... */
  440. if (!queue)
  441. return 0;
  442. queue->init_msg_received = false;
  443. reinit_completion(&queue->init_done);
  444. return 0;
  445. }
  446. void
  447. nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
  448. struct nvkm_falcon *falcon,
  449. struct nvkm_msgqueue *queue)
  450. {
  451. int i;
  452. queue->func = func;
  453. queue->falcon = falcon;
  454. mutex_init(&queue->seq_lock);
  455. for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
  456. queue->seq[i].id = i;
  457. init_completion(&queue->init_done);
  458. }