intel_guc_ct.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. /*
  2. * Copyright © 2016-2017 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_guc_ct.h"
  25. enum { CTB_SEND = 0, CTB_RECV = 1 };
  26. enum { CTB_OWNER_HOST = 0 };
  27. void intel_guc_ct_init_early(struct intel_guc_ct *ct)
  28. {
  29. /* we're using static channel owners */
  30. ct->host_channel.owner = CTB_OWNER_HOST;
  31. }
  32. static inline const char *guc_ct_buffer_type_to_str(u32 type)
  33. {
  34. switch (type) {
  35. case INTEL_GUC_CT_BUFFER_TYPE_SEND:
  36. return "SEND";
  37. case INTEL_GUC_CT_BUFFER_TYPE_RECV:
  38. return "RECV";
  39. default:
  40. return "<invalid>";
  41. }
  42. }
  43. static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
  44. u32 cmds_addr, u32 size, u32 owner)
  45. {
  46. DRM_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
  47. desc, cmds_addr, size, owner);
  48. memset(desc, 0, sizeof(*desc));
  49. desc->addr = cmds_addr;
  50. desc->size = size;
  51. desc->owner = owner;
  52. }
  53. static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
  54. {
  55. DRM_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
  56. desc, desc->head, desc->tail);
  57. desc->head = 0;
  58. desc->tail = 0;
  59. desc->is_in_error = 0;
  60. }
  61. static int guc_action_register_ct_buffer(struct intel_guc *guc,
  62. u32 desc_addr,
  63. u32 type)
  64. {
  65. u32 action[] = {
  66. INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
  67. desc_addr,
  68. sizeof(struct guc_ct_buffer_desc),
  69. type
  70. };
  71. int err;
  72. /* Can't use generic send(), CT registration must go over MMIO */
  73. err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
  74. if (err)
  75. DRM_ERROR("CT: register %s buffer failed; err=%d\n",
  76. guc_ct_buffer_type_to_str(type), err);
  77. return err;
  78. }
  79. static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
  80. u32 owner,
  81. u32 type)
  82. {
  83. u32 action[] = {
  84. INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
  85. owner,
  86. type
  87. };
  88. int err;
  89. /* Can't use generic send(), CT deregistration must go over MMIO */
  90. err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
  91. if (err)
  92. DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
  93. guc_ct_buffer_type_to_str(type), owner, err);
  94. return err;
  95. }
  96. static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
  97. {
  98. return ctch->vma != NULL;
  99. }
  100. static int ctch_init(struct intel_guc *guc,
  101. struct intel_guc_ct_channel *ctch)
  102. {
  103. struct i915_vma *vma;
  104. void *blob;
  105. int err;
  106. int i;
  107. GEM_BUG_ON(ctch->vma);
  108. /* We allocate 1 page to hold both descriptors and both buffers.
  109. * ___________.....................
  110. * |desc (SEND)| :
  111. * |___________| PAGE/4
  112. * :___________....................:
  113. * |desc (RECV)| :
  114. * |___________| PAGE/4
  115. * :_______________________________:
  116. * |cmds (SEND) |
  117. * | PAGE/4
  118. * |_______________________________|
  119. * |cmds (RECV) |
  120. * | PAGE/4
  121. * |_______________________________|
  122. *
  123. * Each message can use a maximum of 32 dwords and we don't expect to
  124. * have more than 1 in flight at any time, so we have enough space.
  125. * Some logic further ahead will rely on the fact that there is only 1
  126. * page and that it is always mapped, so if the size is changed the
  127. * other code will need updating as well.
  128. */
  129. /* allocate vma */
  130. vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
  131. if (IS_ERR(vma)) {
  132. err = PTR_ERR(vma);
  133. goto err_out;
  134. }
  135. ctch->vma = vma;
  136. /* map first page */
  137. blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
  138. if (IS_ERR(blob)) {
  139. err = PTR_ERR(blob);
  140. goto err_vma;
  141. }
  142. DRM_DEBUG_DRIVER("CT: vma base=%#x\n", guc_ggtt_offset(ctch->vma));
  143. /* store pointers to desc and cmds */
  144. for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
  145. GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
  146. ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
  147. ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
  148. }
  149. return 0;
  150. err_vma:
  151. i915_vma_unpin_and_release(&ctch->vma);
  152. err_out:
  153. DRM_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
  154. ctch->owner, err);
  155. return err;
  156. }
  157. static void ctch_fini(struct intel_guc *guc,
  158. struct intel_guc_ct_channel *ctch)
  159. {
  160. GEM_BUG_ON(!ctch->vma);
  161. i915_gem_object_unpin_map(ctch->vma->obj);
  162. i915_vma_unpin_and_release(&ctch->vma);
  163. }
  164. static int ctch_open(struct intel_guc *guc,
  165. struct intel_guc_ct_channel *ctch)
  166. {
  167. u32 base;
  168. int err;
  169. int i;
  170. DRM_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
  171. ctch->owner, yesno(ctch_is_open(ctch)));
  172. if (!ctch->vma) {
  173. err = ctch_init(guc, ctch);
  174. if (unlikely(err))
  175. goto err_out;
  176. GEM_BUG_ON(!ctch->vma);
  177. }
  178. /* vma should be already allocated and map'ed */
  179. base = guc_ggtt_offset(ctch->vma);
  180. /* (re)initialize descriptors
  181. * cmds buffers are in the second half of the blob page
  182. */
  183. for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
  184. GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
  185. guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
  186. base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
  187. PAGE_SIZE/4,
  188. ctch->owner);
  189. }
  190. /* register buffers, starting wirh RECV buffer
  191. * descriptors are in first half of the blob
  192. */
  193. err = guc_action_register_ct_buffer(guc,
  194. base + PAGE_SIZE/4 * CTB_RECV,
  195. INTEL_GUC_CT_BUFFER_TYPE_RECV);
  196. if (unlikely(err))
  197. goto err_fini;
  198. err = guc_action_register_ct_buffer(guc,
  199. base + PAGE_SIZE/4 * CTB_SEND,
  200. INTEL_GUC_CT_BUFFER_TYPE_SEND);
  201. if (unlikely(err))
  202. goto err_deregister;
  203. return 0;
  204. err_deregister:
  205. guc_action_deregister_ct_buffer(guc,
  206. ctch->owner,
  207. INTEL_GUC_CT_BUFFER_TYPE_RECV);
  208. err_fini:
  209. ctch_fini(guc, ctch);
  210. err_out:
  211. DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
  212. return err;
  213. }
  214. static void ctch_close(struct intel_guc *guc,
  215. struct intel_guc_ct_channel *ctch)
  216. {
  217. GEM_BUG_ON(!ctch_is_open(ctch));
  218. guc_action_deregister_ct_buffer(guc,
  219. ctch->owner,
  220. INTEL_GUC_CT_BUFFER_TYPE_SEND);
  221. guc_action_deregister_ct_buffer(guc,
  222. ctch->owner,
  223. INTEL_GUC_CT_BUFFER_TYPE_RECV);
  224. ctch_fini(guc, ctch);
  225. }
  226. static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
  227. {
  228. /* For now it's trivial */
  229. return ++ctch->next_fence;
  230. }
  231. static int ctb_write(struct intel_guc_ct_buffer *ctb,
  232. const u32 *action,
  233. u32 len /* in dwords */,
  234. u32 fence)
  235. {
  236. struct guc_ct_buffer_desc *desc = ctb->desc;
  237. u32 head = desc->head / 4; /* in dwords */
  238. u32 tail = desc->tail / 4; /* in dwords */
  239. u32 size = desc->size / 4; /* in dwords */
  240. u32 used; /* in dwords */
  241. u32 header;
  242. u32 *cmds = ctb->cmds;
  243. unsigned int i;
  244. GEM_BUG_ON(desc->size % 4);
  245. GEM_BUG_ON(desc->head % 4);
  246. GEM_BUG_ON(desc->tail % 4);
  247. GEM_BUG_ON(tail >= size);
  248. /*
  249. * tail == head condition indicates empty. GuC FW does not support
  250. * using up the entire buffer to get tail == head meaning full.
  251. */
  252. if (tail < head)
  253. used = (size - head) + tail;
  254. else
  255. used = tail - head;
  256. /* make sure there is a space including extra dw for the fence */
  257. if (unlikely(used + len + 1 >= size))
  258. return -ENOSPC;
  259. /* Write the message. The format is the following:
  260. * DW0: header (including action code)
  261. * DW1: fence
  262. * DW2+: action data
  263. */
  264. header = (len << GUC_CT_MSG_LEN_SHIFT) |
  265. (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
  266. (action[0] << GUC_CT_MSG_ACTION_SHIFT);
  267. cmds[tail] = header;
  268. tail = (tail + 1) % size;
  269. cmds[tail] = fence;
  270. tail = (tail + 1) % size;
  271. for (i = 1; i < len; i++) {
  272. cmds[tail] = action[i];
  273. tail = (tail + 1) % size;
  274. }
  275. /* now update desc tail (back in bytes) */
  276. desc->tail = tail * 4;
  277. GEM_BUG_ON(desc->tail > desc->size);
  278. return 0;
  279. }
  280. /* Wait for the response from the GuC.
  281. * @fence: response fence
  282. * @status: placeholder for status
  283. * return: 0 response received (status is valid)
  284. * -ETIMEDOUT no response within hardcoded timeout
  285. * -EPROTO no response, ct buffer was in error
  286. */
  287. static int wait_for_response(struct guc_ct_buffer_desc *desc,
  288. u32 fence,
  289. u32 *status)
  290. {
  291. int err;
  292. /*
  293. * Fast commands should complete in less than 10us, so sample quickly
  294. * up to that length of time, then switch to a slower sleep-wait loop.
  295. * No GuC command should ever take longer than 10ms.
  296. */
  297. #define done (READ_ONCE(desc->fence) == fence)
  298. err = wait_for_us(done, 10);
  299. if (err)
  300. err = wait_for(done, 10);
  301. #undef done
  302. if (unlikely(err)) {
  303. DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
  304. fence, desc->fence);
  305. if (WARN_ON(desc->is_in_error)) {
  306. /* Something went wrong with the messaging, try to reset
  307. * the buffer and hope for the best
  308. */
  309. guc_ct_buffer_desc_reset(desc);
  310. err = -EPROTO;
  311. }
  312. }
  313. *status = desc->status;
  314. return err;
  315. }
  316. static int ctch_send(struct intel_guc *guc,
  317. struct intel_guc_ct_channel *ctch,
  318. const u32 *action,
  319. u32 len,
  320. u32 *status)
  321. {
  322. struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
  323. struct guc_ct_buffer_desc *desc = ctb->desc;
  324. u32 fence;
  325. int err;
  326. GEM_BUG_ON(!ctch_is_open(ctch));
  327. GEM_BUG_ON(!len);
  328. GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
  329. fence = ctch_get_next_fence(ctch);
  330. err = ctb_write(ctb, action, len, fence);
  331. if (unlikely(err))
  332. return err;
  333. intel_guc_notify(guc);
  334. err = wait_for_response(desc, fence, status);
  335. if (unlikely(err))
  336. return err;
  337. if (*status != INTEL_GUC_STATUS_SUCCESS)
  338. return -EIO;
  339. return 0;
  340. }
  341. /*
  342. * Command Transport (CT) buffer based GuC send function.
  343. */
  344. static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len)
  345. {
  346. struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
  347. u32 status = ~0; /* undefined */
  348. int err;
  349. mutex_lock(&guc->send_mutex);
  350. err = ctch_send(guc, ctch, action, len, &status);
  351. if (unlikely(err)) {
  352. DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
  353. action[0], err, status);
  354. }
  355. mutex_unlock(&guc->send_mutex);
  356. return err;
  357. }
  358. /**
  359. * Enable buffer based command transport
  360. * Shall only be called for platforms with HAS_GUC_CT.
  361. * @guc: the guc
  362. * return: 0 on success
  363. * non-zero on failure
  364. */
  365. int intel_guc_enable_ct(struct intel_guc *guc)
  366. {
  367. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  368. struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
  369. int err;
  370. GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
  371. err = ctch_open(guc, ctch);
  372. if (unlikely(err))
  373. return err;
  374. /* Switch into cmd transport buffer based send() */
  375. guc->send = intel_guc_send_ct;
  376. DRM_INFO("CT: %s\n", enableddisabled(true));
  377. return 0;
  378. }
  379. /**
  380. * Disable buffer based command transport.
  381. * Shall only be called for platforms with HAS_GUC_CT.
  382. * @guc: the guc
  383. */
  384. void intel_guc_disable_ct(struct intel_guc *guc)
  385. {
  386. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  387. struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
  388. GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
  389. if (!ctch_is_open(ctch))
  390. return;
  391. ctch_close(guc, ctch);
  392. /* Disable send */
  393. guc->send = intel_guc_send_nop;
  394. DRM_INFO("CT: %s\n", enableddisabled(false));
  395. }