intel_guc_ct.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905
  1. /*
  2. * Copyright © 2016-2017 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_guc_ct.h"
  25. #ifdef CONFIG_DRM_I915_DEBUG_GUC
  26. #define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
  27. #else
  28. #define CT_DEBUG_DRIVER(...) do { } while (0)
  29. #endif
  30. struct ct_request {
  31. struct list_head link;
  32. u32 fence;
  33. u32 status;
  34. u32 response_len;
  35. u32 *response_buf;
  36. };
  37. struct ct_incoming_request {
  38. struct list_head link;
  39. u32 msg[];
  40. };
  41. enum { CTB_SEND = 0, CTB_RECV = 1 };
  42. enum { CTB_OWNER_HOST = 0 };
  43. static void ct_incoming_request_worker_func(struct work_struct *w);
  44. /**
  45. * intel_guc_ct_init_early - Initialize CT state without requiring device access
  46. * @ct: pointer to CT struct
  47. */
  48. void intel_guc_ct_init_early(struct intel_guc_ct *ct)
  49. {
  50. /* we're using static channel owners */
  51. ct->host_channel.owner = CTB_OWNER_HOST;
  52. spin_lock_init(&ct->lock);
  53. INIT_LIST_HEAD(&ct->pending_requests);
  54. INIT_LIST_HEAD(&ct->incoming_requests);
  55. INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
  56. }
  57. static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
  58. {
  59. return container_of(ct, struct intel_guc, ct);
  60. }
  61. static inline const char *guc_ct_buffer_type_to_str(u32 type)
  62. {
  63. switch (type) {
  64. case INTEL_GUC_CT_BUFFER_TYPE_SEND:
  65. return "SEND";
  66. case INTEL_GUC_CT_BUFFER_TYPE_RECV:
  67. return "RECV";
  68. default:
  69. return "<invalid>";
  70. }
  71. }
  72. static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
  73. u32 cmds_addr, u32 size, u32 owner)
  74. {
  75. CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
  76. desc, cmds_addr, size, owner);
  77. memset(desc, 0, sizeof(*desc));
  78. desc->addr = cmds_addr;
  79. desc->size = size;
  80. desc->owner = owner;
  81. }
  82. static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
  83. {
  84. CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
  85. desc, desc->head, desc->tail);
  86. desc->head = 0;
  87. desc->tail = 0;
  88. desc->is_in_error = 0;
  89. }
  90. static int guc_action_register_ct_buffer(struct intel_guc *guc,
  91. u32 desc_addr,
  92. u32 type)
  93. {
  94. u32 action[] = {
  95. INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
  96. desc_addr,
  97. sizeof(struct guc_ct_buffer_desc),
  98. type
  99. };
  100. int err;
  101. /* Can't use generic send(), CT registration must go over MMIO */
  102. err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
  103. if (err)
  104. DRM_ERROR("CT: register %s buffer failed; err=%d\n",
  105. guc_ct_buffer_type_to_str(type), err);
  106. return err;
  107. }
  108. static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
  109. u32 owner,
  110. u32 type)
  111. {
  112. u32 action[] = {
  113. INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
  114. owner,
  115. type
  116. };
  117. int err;
  118. /* Can't use generic send(), CT deregistration must go over MMIO */
  119. err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
  120. if (err)
  121. DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
  122. guc_ct_buffer_type_to_str(type), owner, err);
  123. return err;
  124. }
  125. static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
  126. {
  127. return ctch->vma != NULL;
  128. }
  129. static int ctch_init(struct intel_guc *guc,
  130. struct intel_guc_ct_channel *ctch)
  131. {
  132. struct i915_vma *vma;
  133. void *blob;
  134. int err;
  135. int i;
  136. GEM_BUG_ON(ctch->vma);
  137. /* We allocate 1 page to hold both descriptors and both buffers.
  138. * ___________.....................
  139. * |desc (SEND)| :
  140. * |___________| PAGE/4
  141. * :___________....................:
  142. * |desc (RECV)| :
  143. * |___________| PAGE/4
  144. * :_______________________________:
  145. * |cmds (SEND) |
  146. * | PAGE/4
  147. * |_______________________________|
  148. * |cmds (RECV) |
  149. * | PAGE/4
  150. * |_______________________________|
  151. *
  152. * Each message can use a maximum of 32 dwords and we don't expect to
  153. * have more than 1 in flight at any time, so we have enough space.
  154. * Some logic further ahead will rely on the fact that there is only 1
  155. * page and that it is always mapped, so if the size is changed the
  156. * other code will need updating as well.
  157. */
  158. /* allocate vma */
  159. vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
  160. if (IS_ERR(vma)) {
  161. err = PTR_ERR(vma);
  162. goto err_out;
  163. }
  164. ctch->vma = vma;
  165. /* map first page */
  166. blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
  167. if (IS_ERR(blob)) {
  168. err = PTR_ERR(blob);
  169. goto err_vma;
  170. }
  171. CT_DEBUG_DRIVER("CT: vma base=%#x\n",
  172. intel_guc_ggtt_offset(guc, ctch->vma));
  173. /* store pointers to desc and cmds */
  174. for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
  175. GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
  176. ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
  177. ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
  178. }
  179. return 0;
  180. err_vma:
  181. i915_vma_unpin_and_release(&ctch->vma);
  182. err_out:
  183. CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
  184. ctch->owner, err);
  185. return err;
  186. }
  187. static void ctch_fini(struct intel_guc *guc,
  188. struct intel_guc_ct_channel *ctch)
  189. {
  190. GEM_BUG_ON(!ctch->vma);
  191. i915_gem_object_unpin_map(ctch->vma->obj);
  192. i915_vma_unpin_and_release(&ctch->vma);
  193. }
  194. static int ctch_open(struct intel_guc *guc,
  195. struct intel_guc_ct_channel *ctch)
  196. {
  197. u32 base;
  198. int err;
  199. int i;
  200. CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
  201. ctch->owner, yesno(ctch_is_open(ctch)));
  202. if (!ctch->vma) {
  203. err = ctch_init(guc, ctch);
  204. if (unlikely(err))
  205. goto err_out;
  206. GEM_BUG_ON(!ctch->vma);
  207. }
  208. /* vma should be already allocated and map'ed */
  209. base = intel_guc_ggtt_offset(guc, ctch->vma);
  210. /* (re)initialize descriptors
  211. * cmds buffers are in the second half of the blob page
  212. */
  213. for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
  214. GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
  215. guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
  216. base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
  217. PAGE_SIZE/4,
  218. ctch->owner);
  219. }
  220. /* register buffers, starting wirh RECV buffer
  221. * descriptors are in first half of the blob
  222. */
  223. err = guc_action_register_ct_buffer(guc,
  224. base + PAGE_SIZE/4 * CTB_RECV,
  225. INTEL_GUC_CT_BUFFER_TYPE_RECV);
  226. if (unlikely(err))
  227. goto err_fini;
  228. err = guc_action_register_ct_buffer(guc,
  229. base + PAGE_SIZE/4 * CTB_SEND,
  230. INTEL_GUC_CT_BUFFER_TYPE_SEND);
  231. if (unlikely(err))
  232. goto err_deregister;
  233. return 0;
  234. err_deregister:
  235. guc_action_deregister_ct_buffer(guc,
  236. ctch->owner,
  237. INTEL_GUC_CT_BUFFER_TYPE_RECV);
  238. err_fini:
  239. ctch_fini(guc, ctch);
  240. err_out:
  241. DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
  242. return err;
  243. }
  244. static void ctch_close(struct intel_guc *guc,
  245. struct intel_guc_ct_channel *ctch)
  246. {
  247. GEM_BUG_ON(!ctch_is_open(ctch));
  248. guc_action_deregister_ct_buffer(guc,
  249. ctch->owner,
  250. INTEL_GUC_CT_BUFFER_TYPE_SEND);
  251. guc_action_deregister_ct_buffer(guc,
  252. ctch->owner,
  253. INTEL_GUC_CT_BUFFER_TYPE_RECV);
  254. ctch_fini(guc, ctch);
  255. }
  256. static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
  257. {
  258. /* For now it's trivial */
  259. return ++ctch->next_fence;
  260. }
  261. /**
  262. * DOC: CTB Host to GuC request
  263. *
  264. * Format of the CTB Host to GuC request message is as follows::
  265. *
  266. * +------------+---------+---------+---------+---------+
  267. * | msg[0] | [1] | [2] | ... | [n-1] |
  268. * +------------+---------+---------+---------+---------+
  269. * | MESSAGE | MESSAGE PAYLOAD |
  270. * + HEADER +---------+---------+---------+---------+
  271. * | | 0 | 1 | ... | n |
  272. * +============+=========+=========+=========+=========+
  273. * | len >= 1 | FENCE | request specific data |
  274. * +------+-----+---------+---------+---------+---------+
  275. *
  276. * ^-----------------len-------------------^
  277. */
  278. static int ctb_write(struct intel_guc_ct_buffer *ctb,
  279. const u32 *action,
  280. u32 len /* in dwords */,
  281. u32 fence,
  282. bool want_response)
  283. {
  284. struct guc_ct_buffer_desc *desc = ctb->desc;
  285. u32 head = desc->head / 4; /* in dwords */
  286. u32 tail = desc->tail / 4; /* in dwords */
  287. u32 size = desc->size / 4; /* in dwords */
  288. u32 used; /* in dwords */
  289. u32 header;
  290. u32 *cmds = ctb->cmds;
  291. unsigned int i;
  292. GEM_BUG_ON(desc->size % 4);
  293. GEM_BUG_ON(desc->head % 4);
  294. GEM_BUG_ON(desc->tail % 4);
  295. GEM_BUG_ON(tail >= size);
  296. /*
  297. * tail == head condition indicates empty. GuC FW does not support
  298. * using up the entire buffer to get tail == head meaning full.
  299. */
  300. if (tail < head)
  301. used = (size - head) + tail;
  302. else
  303. used = tail - head;
  304. /* make sure there is a space including extra dw for the fence */
  305. if (unlikely(used + len + 1 >= size))
  306. return -ENOSPC;
  307. /*
  308. * Write the message. The format is the following:
  309. * DW0: header (including action code)
  310. * DW1: fence
  311. * DW2+: action data
  312. */
  313. header = (len << GUC_CT_MSG_LEN_SHIFT) |
  314. (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
  315. (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
  316. (action[0] << GUC_CT_MSG_ACTION_SHIFT);
  317. CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
  318. 4, &header, 4, &fence,
  319. 4 * (len - 1), &action[1]);
  320. cmds[tail] = header;
  321. tail = (tail + 1) % size;
  322. cmds[tail] = fence;
  323. tail = (tail + 1) % size;
  324. for (i = 1; i < len; i++) {
  325. cmds[tail] = action[i];
  326. tail = (tail + 1) % size;
  327. }
  328. /* now update desc tail (back in bytes) */
  329. desc->tail = tail * 4;
  330. GEM_BUG_ON(desc->tail > desc->size);
  331. return 0;
  332. }
  333. /**
  334. * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
  335. * @desc: buffer descriptor
  336. * @fence: response fence
  337. * @status: placeholder for status
  338. *
  339. * Guc will update CT buffer descriptor with new fence and status
  340. * after processing the command identified by the fence. Wait for
  341. * specified fence and then read from the descriptor status of the
  342. * command.
  343. *
  344. * Return:
  345. * * 0 response received (status is valid)
  346. * * -ETIMEDOUT no response within hardcoded timeout
  347. * * -EPROTO no response, CT buffer is in error
  348. */
  349. static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
  350. u32 fence,
  351. u32 *status)
  352. {
  353. int err;
  354. /*
  355. * Fast commands should complete in less than 10us, so sample quickly
  356. * up to that length of time, then switch to a slower sleep-wait loop.
  357. * No GuC command should ever take longer than 10ms.
  358. */
  359. #define done (READ_ONCE(desc->fence) == fence)
  360. err = wait_for_us(done, 10);
  361. if (err)
  362. err = wait_for(done, 10);
  363. #undef done
  364. if (unlikely(err)) {
  365. DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
  366. fence, desc->fence);
  367. if (WARN_ON(desc->is_in_error)) {
  368. /* Something went wrong with the messaging, try to reset
  369. * the buffer and hope for the best
  370. */
  371. guc_ct_buffer_desc_reset(desc);
  372. err = -EPROTO;
  373. }
  374. }
  375. *status = desc->status;
  376. return err;
  377. }
  378. /**
  379. * wait_for_ct_request_update - Wait for CT request state update.
  380. * @req: pointer to pending request
  381. * @status: placeholder for status
  382. *
  383. * For each sent request, Guc shall send bac CT response message.
  384. * Our message handler will update status of tracked request once
  385. * response message with given fence is received. Wait here and
  386. * check for valid response status value.
  387. *
  388. * Return:
  389. * * 0 response received (status is valid)
  390. * * -ETIMEDOUT no response within hardcoded timeout
  391. */
  392. static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
  393. {
  394. int err;
  395. /*
  396. * Fast commands should complete in less than 10us, so sample quickly
  397. * up to that length of time, then switch to a slower sleep-wait loop.
  398. * No GuC command should ever take longer than 10ms.
  399. */
  400. #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
  401. err = wait_for_us(done, 10);
  402. if (err)
  403. err = wait_for(done, 10);
  404. #undef done
  405. if (unlikely(err))
  406. DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
  407. *status = req->status;
  408. return err;
  409. }
  410. static int ctch_send(struct intel_guc_ct *ct,
  411. struct intel_guc_ct_channel *ctch,
  412. const u32 *action,
  413. u32 len,
  414. u32 *response_buf,
  415. u32 response_buf_size,
  416. u32 *status)
  417. {
  418. struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
  419. struct guc_ct_buffer_desc *desc = ctb->desc;
  420. struct ct_request request;
  421. unsigned long flags;
  422. u32 fence;
  423. int err;
  424. GEM_BUG_ON(!ctch_is_open(ctch));
  425. GEM_BUG_ON(!len);
  426. GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
  427. GEM_BUG_ON(!response_buf && response_buf_size);
  428. fence = ctch_get_next_fence(ctch);
  429. request.fence = fence;
  430. request.status = 0;
  431. request.response_len = response_buf_size;
  432. request.response_buf = response_buf;
  433. spin_lock_irqsave(&ct->lock, flags);
  434. list_add_tail(&request.link, &ct->pending_requests);
  435. spin_unlock_irqrestore(&ct->lock, flags);
  436. err = ctb_write(ctb, action, len, fence, !!response_buf);
  437. if (unlikely(err))
  438. goto unlink;
  439. intel_guc_notify(ct_to_guc(ct));
  440. if (response_buf)
  441. err = wait_for_ct_request_update(&request, status);
  442. else
  443. err = wait_for_ctb_desc_update(desc, fence, status);
  444. if (unlikely(err))
  445. goto unlink;
  446. if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
  447. err = -EIO;
  448. goto unlink;
  449. }
  450. if (response_buf) {
  451. /* There shall be no data in the status */
  452. WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
  453. /* Return actual response len */
  454. err = request.response_len;
  455. } else {
  456. /* There shall be no response payload */
  457. WARN_ON(request.response_len);
  458. /* Return data decoded from the status dword */
  459. err = INTEL_GUC_MSG_TO_DATA(*status);
  460. }
  461. unlink:
  462. spin_lock_irqsave(&ct->lock, flags);
  463. list_del(&request.link);
  464. spin_unlock_irqrestore(&ct->lock, flags);
  465. return err;
  466. }
  467. /*
  468. * Command Transport (CT) buffer based GuC send function.
  469. */
  470. static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
  471. u32 *response_buf, u32 response_buf_size)
  472. {
  473. struct intel_guc_ct *ct = &guc->ct;
  474. struct intel_guc_ct_channel *ctch = &ct->host_channel;
  475. u32 status = ~0; /* undefined */
  476. int ret;
  477. mutex_lock(&guc->send_mutex);
  478. ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
  479. &status);
  480. if (unlikely(ret < 0)) {
  481. DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
  482. action[0], ret, status);
  483. } else if (unlikely(ret)) {
  484. CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
  485. action[0], ret, ret);
  486. }
  487. mutex_unlock(&guc->send_mutex);
  488. return ret;
  489. }
  490. static inline unsigned int ct_header_get_len(u32 header)
  491. {
  492. return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
  493. }
  494. static inline unsigned int ct_header_get_action(u32 header)
  495. {
  496. return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
  497. }
  498. static inline bool ct_header_is_response(u32 header)
  499. {
  500. return ct_header_get_action(header) == INTEL_GUC_ACTION_DEFAULT;
  501. }
  502. static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
  503. {
  504. struct guc_ct_buffer_desc *desc = ctb->desc;
  505. u32 head = desc->head / 4; /* in dwords */
  506. u32 tail = desc->tail / 4; /* in dwords */
  507. u32 size = desc->size / 4; /* in dwords */
  508. u32 *cmds = ctb->cmds;
  509. s32 available; /* in dwords */
  510. unsigned int len;
  511. unsigned int i;
  512. GEM_BUG_ON(desc->size % 4);
  513. GEM_BUG_ON(desc->head % 4);
  514. GEM_BUG_ON(desc->tail % 4);
  515. GEM_BUG_ON(tail >= size);
  516. GEM_BUG_ON(head >= size);
  517. /* tail == head condition indicates empty */
  518. available = tail - head;
  519. if (unlikely(available == 0))
  520. return -ENODATA;
  521. /* beware of buffer wrap case */
  522. if (unlikely(available < 0))
  523. available += size;
  524. CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
  525. GEM_BUG_ON(available < 0);
  526. data[0] = cmds[head];
  527. head = (head + 1) % size;
  528. /* message len with header */
  529. len = ct_header_get_len(data[0]) + 1;
  530. if (unlikely(len > (u32)available)) {
  531. DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
  532. 4, data,
  533. 4 * (head + available - 1 > size ?
  534. size - head : available - 1), &cmds[head],
  535. 4 * (head + available - 1 > size ?
  536. available - 1 - size + head : 0), &cmds[0]);
  537. return -EPROTO;
  538. }
  539. for (i = 1; i < len; i++) {
  540. data[i] = cmds[head];
  541. head = (head + 1) % size;
  542. }
  543. CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
  544. desc->head = head * 4;
  545. return 0;
  546. }
  547. /**
  548. * DOC: CTB GuC to Host response
  549. *
  550. * Format of the CTB GuC to Host response message is as follows::
  551. *
  552. * +------------+---------+---------+---------+---------+---------+
  553. * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
  554. * +------------+---------+---------+---------+---------+---------+
  555. * | MESSAGE | MESSAGE PAYLOAD |
  556. * + HEADER +---------+---------+---------+---------+---------+
  557. * | | 0 | 1 | 2 | ... | n |
  558. * +============+=========+=========+=========+=========+=========+
  559. * | len >= 2 | FENCE | STATUS | response specific data |
  560. * +------+-----+---------+---------+---------+---------+---------+
  561. *
  562. * ^-----------------------len-----------------------^
  563. */
  564. static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
  565. {
  566. u32 header = msg[0];
  567. u32 len = ct_header_get_len(header);
  568. u32 msglen = len + 1; /* total message length including header */
  569. u32 fence;
  570. u32 status;
  571. u32 datalen;
  572. struct ct_request *req;
  573. bool found = false;
  574. GEM_BUG_ON(!ct_header_is_response(header));
  575. GEM_BUG_ON(!in_irq());
  576. /* Response payload shall at least include fence and status */
  577. if (unlikely(len < 2)) {
  578. DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
  579. return -EPROTO;
  580. }
  581. fence = msg[1];
  582. status = msg[2];
  583. datalen = len - 2;
  584. /* Format of the status follows RESPONSE message */
  585. if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
  586. DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
  587. return -EPROTO;
  588. }
  589. CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
  590. spin_lock(&ct->lock);
  591. list_for_each_entry(req, &ct->pending_requests, link) {
  592. if (unlikely(fence != req->fence)) {
  593. CT_DEBUG_DRIVER("CT: request %u awaits response\n",
  594. req->fence);
  595. continue;
  596. }
  597. if (unlikely(datalen > req->response_len)) {
  598. DRM_ERROR("CT: response %u too long %*ph\n",
  599. req->fence, 4 * msglen, msg);
  600. datalen = 0;
  601. }
  602. if (datalen)
  603. memcpy(req->response_buf, msg + 3, 4 * datalen);
  604. req->response_len = datalen;
  605. WRITE_ONCE(req->status, status);
  606. found = true;
  607. break;
  608. }
  609. spin_unlock(&ct->lock);
  610. if (!found)
  611. DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
  612. return 0;
  613. }
  614. static void ct_process_request(struct intel_guc_ct *ct,
  615. u32 action, u32 len, const u32 *payload)
  616. {
  617. struct intel_guc *guc = ct_to_guc(ct);
  618. CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
  619. switch (action) {
  620. case INTEL_GUC_ACTION_DEFAULT:
  621. if (unlikely(len < 1))
  622. goto fail_unexpected;
  623. intel_guc_to_host_process_recv_msg(guc, *payload);
  624. break;
  625. default:
  626. fail_unexpected:
  627. DRM_ERROR("CT: unexpected request %x %*ph\n",
  628. action, 4 * len, payload);
  629. break;
  630. }
  631. }
  632. static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
  633. {
  634. unsigned long flags;
  635. struct ct_incoming_request *request;
  636. u32 header;
  637. u32 *payload;
  638. bool done;
  639. spin_lock_irqsave(&ct->lock, flags);
  640. request = list_first_entry_or_null(&ct->incoming_requests,
  641. struct ct_incoming_request, link);
  642. if (request)
  643. list_del(&request->link);
  644. done = !!list_empty(&ct->incoming_requests);
  645. spin_unlock_irqrestore(&ct->lock, flags);
  646. if (!request)
  647. return true;
  648. header = request->msg[0];
  649. payload = &request->msg[1];
  650. ct_process_request(ct,
  651. ct_header_get_action(header),
  652. ct_header_get_len(header),
  653. payload);
  654. kfree(request);
  655. return done;
  656. }
  657. static void ct_incoming_request_worker_func(struct work_struct *w)
  658. {
  659. struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
  660. bool done;
  661. done = ct_process_incoming_requests(ct);
  662. if (!done)
  663. queue_work(system_unbound_wq, &ct->worker);
  664. }
  665. /**
  666. * DOC: CTB GuC to Host request
  667. *
  668. * Format of the CTB GuC to Host request message is as follows::
  669. *
  670. * +------------+---------+---------+---------+---------+---------+
  671. * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
  672. * +------------+---------+---------+---------+---------+---------+
  673. * | MESSAGE | MESSAGE PAYLOAD |
  674. * + HEADER +---------+---------+---------+---------+---------+
  675. * | | 0 | 1 | 2 | ... | n |
  676. * +============+=========+=========+=========+=========+=========+
  677. * | len | request specific data |
  678. * +------+-----+---------+---------+---------+---------+---------+
  679. *
  680. * ^-----------------------len-----------------------^
  681. */
  682. static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
  683. {
  684. u32 header = msg[0];
  685. u32 len = ct_header_get_len(header);
  686. u32 msglen = len + 1; /* total message length including header */
  687. struct ct_incoming_request *request;
  688. unsigned long flags;
  689. GEM_BUG_ON(ct_header_is_response(header));
  690. request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
  691. if (unlikely(!request)) {
  692. DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
  693. return 0; /* XXX: -ENOMEM ? */
  694. }
  695. memcpy(request->msg, msg, 4 * msglen);
  696. spin_lock_irqsave(&ct->lock, flags);
  697. list_add_tail(&request->link, &ct->incoming_requests);
  698. spin_unlock_irqrestore(&ct->lock, flags);
  699. queue_work(system_unbound_wq, &ct->worker);
  700. return 0;
  701. }
  702. static void ct_process_host_channel(struct intel_guc_ct *ct)
  703. {
  704. struct intel_guc_ct_channel *ctch = &ct->host_channel;
  705. struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
  706. u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
  707. int err = 0;
  708. if (!ctch_is_open(ctch))
  709. return;
  710. do {
  711. err = ctb_read(ctb, msg);
  712. if (err)
  713. break;
  714. if (ct_header_is_response(msg[0]))
  715. err = ct_handle_response(ct, msg);
  716. else
  717. err = ct_handle_request(ct, msg);
  718. } while (!err);
  719. if (GEM_WARN_ON(err == -EPROTO)) {
  720. DRM_ERROR("CT: corrupted message detected!\n");
  721. ctb->desc->is_in_error = 1;
  722. }
  723. }
  724. /*
  725. * When we're communicating with the GuC over CT, GuC uses events
  726. * to notify us about new messages being posted on the RECV buffer.
  727. */
  728. static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
  729. {
  730. struct intel_guc_ct *ct = &guc->ct;
  731. ct_process_host_channel(ct);
  732. }
  733. /**
  734. * intel_guc_ct_enable - Enable buffer based command transport.
  735. * @ct: pointer to CT struct
  736. *
  737. * Shall only be called for platforms with HAS_GUC_CT.
  738. *
  739. * Return: 0 on success, a negative errno code on failure.
  740. */
  741. int intel_guc_ct_enable(struct intel_guc_ct *ct)
  742. {
  743. struct intel_guc *guc = ct_to_guc(ct);
  744. struct drm_i915_private *i915 = guc_to_i915(guc);
  745. struct intel_guc_ct_channel *ctch = &ct->host_channel;
  746. int err;
  747. GEM_BUG_ON(!HAS_GUC_CT(i915));
  748. err = ctch_open(guc, ctch);
  749. if (unlikely(err))
  750. return err;
  751. /* Switch into cmd transport buffer based send() */
  752. guc->send = intel_guc_send_ct;
  753. guc->handler = intel_guc_to_host_event_handler_ct;
  754. DRM_INFO("CT: %s\n", enableddisabled(true));
  755. return 0;
  756. }
  757. /**
  758. * intel_guc_ct_disable - Disable buffer based command transport.
  759. * @ct: pointer to CT struct
  760. *
  761. * Shall only be called for platforms with HAS_GUC_CT.
  762. */
  763. void intel_guc_ct_disable(struct intel_guc_ct *ct)
  764. {
  765. struct intel_guc *guc = ct_to_guc(ct);
  766. struct drm_i915_private *i915 = guc_to_i915(guc);
  767. struct intel_guc_ct_channel *ctch = &ct->host_channel;
  768. GEM_BUG_ON(!HAS_GUC_CT(i915));
  769. if (!ctch_is_open(ctch))
  770. return;
  771. ctch_close(guc, ctch);
  772. /* Disable send */
  773. guc->send = intel_guc_send_nop;
  774. guc->handler = intel_guc_to_host_event_handler_nop;
  775. DRM_INFO("CT: %s\n", enableddisabled(false));
  776. }