intel_guc_ct.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. * Copyright © 2016-2017 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_guc_ct.h"
  25. enum { CTB_SEND = 0, CTB_RECV = 1 };
  26. enum { CTB_OWNER_HOST = 0 };
  27. void intel_guc_ct_init_early(struct intel_guc_ct *ct)
  28. {
  29. /* we're using static channel owners */
  30. ct->host_channel.owner = CTB_OWNER_HOST;
  31. }
  32. static inline const char *guc_ct_buffer_type_to_str(u32 type)
  33. {
  34. switch (type) {
  35. case INTEL_GUC_CT_BUFFER_TYPE_SEND:
  36. return "SEND";
  37. case INTEL_GUC_CT_BUFFER_TYPE_RECV:
  38. return "RECV";
  39. default:
  40. return "<invalid>";
  41. }
  42. }
  43. static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
  44. u32 cmds_addr, u32 size, u32 owner)
  45. {
  46. DRM_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
  47. desc, cmds_addr, size, owner);
  48. memset(desc, 0, sizeof(*desc));
  49. desc->addr = cmds_addr;
  50. desc->size = size;
  51. desc->owner = owner;
  52. }
  53. static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
  54. {
  55. DRM_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
  56. desc, desc->head, desc->tail);
  57. desc->head = 0;
  58. desc->tail = 0;
  59. desc->is_in_error = 0;
  60. }
  61. static int guc_action_register_ct_buffer(struct intel_guc *guc,
  62. u32 desc_addr,
  63. u32 type)
  64. {
  65. u32 action[] = {
  66. INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
  67. desc_addr,
  68. sizeof(struct guc_ct_buffer_desc),
  69. type
  70. };
  71. int err;
  72. /* Can't use generic send(), CT registration must go over MMIO */
  73. err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
  74. if (err)
  75. DRM_ERROR("CT: register %s buffer failed; err=%d\n",
  76. guc_ct_buffer_type_to_str(type), err);
  77. return err;
  78. }
  79. static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
  80. u32 owner,
  81. u32 type)
  82. {
  83. u32 action[] = {
  84. INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
  85. owner,
  86. type
  87. };
  88. int err;
  89. /* Can't use generic send(), CT deregistration must go over MMIO */
  90. err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
  91. if (err)
  92. DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
  93. guc_ct_buffer_type_to_str(type), owner, err);
  94. return err;
  95. }
  96. static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
  97. {
  98. return ctch->vma != NULL;
  99. }
  100. static int ctch_init(struct intel_guc *guc,
  101. struct intel_guc_ct_channel *ctch)
  102. {
  103. struct i915_vma *vma;
  104. void *blob;
  105. int err;
  106. int i;
  107. GEM_BUG_ON(ctch->vma);
  108. /* We allocate 1 page to hold both descriptors and both buffers.
  109. * ___________.....................
  110. * |desc (SEND)| :
  111. * |___________| PAGE/4
  112. * :___________....................:
  113. * |desc (RECV)| :
  114. * |___________| PAGE/4
  115. * :_______________________________:
  116. * |cmds (SEND) |
  117. * | PAGE/4
  118. * |_______________________________|
  119. * |cmds (RECV) |
  120. * | PAGE/4
  121. * |_______________________________|
  122. *
  123. * Each message can use a maximum of 32 dwords and we don't expect to
  124. * have more than 1 in flight at any time, so we have enough space.
  125. * Some logic further ahead will rely on the fact that there is only 1
  126. * page and that it is always mapped, so if the size is changed the
  127. * other code will need updating as well.
  128. */
  129. /* allocate vma */
  130. vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
  131. if (IS_ERR(vma)) {
  132. err = PTR_ERR(vma);
  133. goto err_out;
  134. }
  135. ctch->vma = vma;
  136. /* map first page */
  137. blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
  138. if (IS_ERR(blob)) {
  139. err = PTR_ERR(blob);
  140. goto err_vma;
  141. }
  142. DRM_DEBUG_DRIVER("CT: vma base=%#x\n", guc_ggtt_offset(ctch->vma));
  143. /* store pointers to desc and cmds */
  144. for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
  145. GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
  146. ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
  147. ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
  148. }
  149. return 0;
  150. err_vma:
  151. i915_vma_unpin_and_release(&ctch->vma);
  152. err_out:
  153. DRM_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
  154. ctch->owner, err);
  155. return err;
  156. }
  157. static void ctch_fini(struct intel_guc *guc,
  158. struct intel_guc_ct_channel *ctch)
  159. {
  160. GEM_BUG_ON(!ctch->vma);
  161. i915_gem_object_unpin_map(ctch->vma->obj);
  162. i915_vma_unpin_and_release(&ctch->vma);
  163. }
  164. static int ctch_open(struct intel_guc *guc,
  165. struct intel_guc_ct_channel *ctch)
  166. {
  167. u32 base;
  168. int err;
  169. int i;
  170. DRM_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
  171. ctch->owner, yesno(ctch_is_open(ctch)));
  172. if (!ctch->vma) {
  173. err = ctch_init(guc, ctch);
  174. if (unlikely(err))
  175. goto err_out;
  176. }
  177. /* vma should be already allocated and map'ed */
  178. base = guc_ggtt_offset(ctch->vma);
  179. /* (re)initialize descriptors
  180. * cmds buffers are in the second half of the blob page
  181. */
  182. for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
  183. GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
  184. guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
  185. base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
  186. PAGE_SIZE/4,
  187. ctch->owner);
  188. }
  189. /* register buffers, starting wirh RECV buffer
  190. * descriptors are in first half of the blob
  191. */
  192. err = guc_action_register_ct_buffer(guc,
  193. base + PAGE_SIZE/4 * CTB_RECV,
  194. INTEL_GUC_CT_BUFFER_TYPE_RECV);
  195. if (unlikely(err))
  196. goto err_fini;
  197. err = guc_action_register_ct_buffer(guc,
  198. base + PAGE_SIZE/4 * CTB_SEND,
  199. INTEL_GUC_CT_BUFFER_TYPE_SEND);
  200. if (unlikely(err))
  201. goto err_deregister;
  202. return 0;
  203. err_deregister:
  204. guc_action_deregister_ct_buffer(guc,
  205. ctch->owner,
  206. INTEL_GUC_CT_BUFFER_TYPE_RECV);
  207. err_fini:
  208. ctch_fini(guc, ctch);
  209. err_out:
  210. DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
  211. return err;
  212. }
  213. static void ctch_close(struct intel_guc *guc,
  214. struct intel_guc_ct_channel *ctch)
  215. {
  216. GEM_BUG_ON(!ctch_is_open(ctch));
  217. guc_action_deregister_ct_buffer(guc,
  218. ctch->owner,
  219. INTEL_GUC_CT_BUFFER_TYPE_SEND);
  220. guc_action_deregister_ct_buffer(guc,
  221. ctch->owner,
  222. INTEL_GUC_CT_BUFFER_TYPE_RECV);
  223. ctch_fini(guc, ctch);
  224. }
  225. static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
  226. {
  227. /* For now it's trivial */
  228. return ++ctch->next_fence;
  229. }
  230. static int ctb_write(struct intel_guc_ct_buffer *ctb,
  231. const u32 *action,
  232. u32 len /* in dwords */,
  233. u32 fence)
  234. {
  235. struct guc_ct_buffer_desc *desc = ctb->desc;
  236. u32 head = desc->head / 4; /* in dwords */
  237. u32 tail = desc->tail / 4; /* in dwords */
  238. u32 size = desc->size / 4; /* in dwords */
  239. u32 used; /* in dwords */
  240. u32 header;
  241. u32 *cmds = ctb->cmds;
  242. unsigned int i;
  243. GEM_BUG_ON(desc->size % 4);
  244. GEM_BUG_ON(desc->head % 4);
  245. GEM_BUG_ON(desc->tail % 4);
  246. GEM_BUG_ON(tail >= size);
  247. /*
  248. * tail == head condition indicates empty. GuC FW does not support
  249. * using up the entire buffer to get tail == head meaning full.
  250. */
  251. if (tail < head)
  252. used = (size - head) + tail;
  253. else
  254. used = tail - head;
  255. /* make sure there is a space including extra dw for the fence */
  256. if (unlikely(used + len + 1 >= size))
  257. return -ENOSPC;
  258. /* Write the message. The format is the following:
  259. * DW0: header (including action code)
  260. * DW1: fence
  261. * DW2+: action data
  262. */
  263. header = (len << GUC_CT_MSG_LEN_SHIFT) |
  264. (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
  265. (action[0] << GUC_CT_MSG_ACTION_SHIFT);
  266. cmds[tail] = header;
  267. tail = (tail + 1) % size;
  268. cmds[tail] = fence;
  269. tail = (tail + 1) % size;
  270. for (i = 1; i < len; i++) {
  271. cmds[tail] = action[i];
  272. tail = (tail + 1) % size;
  273. }
  274. /* now update desc tail (back in bytes) */
  275. desc->tail = tail * 4;
  276. GEM_BUG_ON(desc->tail > desc->size);
  277. return 0;
  278. }
  279. /* Wait for the response from the GuC.
  280. * @fence: response fence
  281. * @status: placeholder for status
  282. * return: 0 response received (status is valid)
  283. * -ETIMEDOUT no response within hardcoded timeout
  284. * -EPROTO no response, ct buffer was in error
  285. */
  286. static int wait_for_response(struct guc_ct_buffer_desc *desc,
  287. u32 fence,
  288. u32 *status)
  289. {
  290. int err;
  291. /*
  292. * Fast commands should complete in less than 10us, so sample quickly
  293. * up to that length of time, then switch to a slower sleep-wait loop.
  294. * No GuC command should ever take longer than 10ms.
  295. */
  296. #define done (READ_ONCE(desc->fence) == fence)
  297. err = wait_for_us(done, 10);
  298. if (err)
  299. err = wait_for(done, 10);
  300. #undef done
  301. if (unlikely(err)) {
  302. DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
  303. fence, desc->fence);
  304. if (WARN_ON(desc->is_in_error)) {
  305. /* Something went wrong with the messaging, try to reset
  306. * the buffer and hope for the best
  307. */
  308. guc_ct_buffer_desc_reset(desc);
  309. err = -EPROTO;
  310. }
  311. }
  312. *status = desc->status;
  313. return err;
  314. }
  315. static int ctch_send(struct intel_guc *guc,
  316. struct intel_guc_ct_channel *ctch,
  317. const u32 *action,
  318. u32 len,
  319. u32 *status)
  320. {
  321. struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
  322. struct guc_ct_buffer_desc *desc = ctb->desc;
  323. u32 fence;
  324. int err;
  325. GEM_BUG_ON(!ctch_is_open(ctch));
  326. GEM_BUG_ON(!len);
  327. GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
  328. fence = ctch_get_next_fence(ctch);
  329. err = ctb_write(ctb, action, len, fence);
  330. if (unlikely(err))
  331. return err;
  332. intel_guc_notify(guc);
  333. err = wait_for_response(desc, fence, status);
  334. if (unlikely(err))
  335. return err;
  336. if (*status != INTEL_GUC_STATUS_SUCCESS)
  337. return -EIO;
  338. return 0;
  339. }
  340. /*
  341. * Command Transport (CT) buffer based GuC send function.
  342. */
  343. static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len)
  344. {
  345. struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
  346. u32 status = ~0; /* undefined */
  347. int err;
  348. mutex_lock(&guc->send_mutex);
  349. err = ctch_send(guc, ctch, action, len, &status);
  350. if (unlikely(err)) {
  351. DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
  352. action[0], err, status);
  353. }
  354. mutex_unlock(&guc->send_mutex);
  355. return err;
  356. }
  357. /**
  358. * Enable buffer based command transport
  359. * Shall only be called for platforms with HAS_GUC_CT.
  360. * @guc: the guc
  361. * return: 0 on success
  362. * non-zero on failure
  363. */
  364. int intel_guc_enable_ct(struct intel_guc *guc)
  365. {
  366. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  367. struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
  368. int err;
  369. GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
  370. err = ctch_open(guc, ctch);
  371. if (unlikely(err))
  372. return err;
  373. /* Switch into cmd transport buffer based send() */
  374. guc->send = intel_guc_send_ct;
  375. DRM_INFO("CT: %s\n", enableddisabled(true));
  376. return 0;
  377. }
  378. /**
  379. * Disable buffer based command transport.
  380. * Shall only be called for platforms with HAS_GUC_CT.
  381. * @guc: the guc
  382. */
  383. void intel_guc_disable_ct(struct intel_guc *guc)
  384. {
  385. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  386. struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
  387. GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
  388. if (!ctch_is_open(ctch))
  389. return;
  390. ctch_close(guc, ctch);
  391. /* Disable send */
  392. guc->send = intel_guc_send_nop;
  393. DRM_INFO("CT: %s\n", enableddisabled(false));
  394. }