intel_uc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include "i915_drv.h"
  25. #include "intel_uc.h"
  26. #include <linux/firmware.h>
  27. /* Cleans up uC firmware by releasing the firmware GEM obj.
  28. */
  29. static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
  30. {
  31. struct drm_i915_gem_object *obj;
  32. obj = fetch_and_zero(&uc_fw->obj);
  33. if (obj)
  34. i915_gem_object_put(obj);
  35. uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
  36. }
  37. /* Reset GuC providing us with fresh state for both GuC and HuC.
  38. */
  39. static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
  40. {
  41. int ret;
  42. u32 guc_status;
  43. ret = intel_guc_reset(dev_priv);
  44. if (ret) {
  45. DRM_ERROR("GuC reset failed, ret = %d\n", ret);
  46. return ret;
  47. }
  48. guc_status = I915_READ(GUC_STATUS);
  49. WARN(!(guc_status & GS_MIA_IN_RESET),
  50. "GuC status: 0x%x, MIA core expected to be in reset\n",
  51. guc_status);
  52. return ret;
  53. }
  54. void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
  55. {
  56. if (!HAS_GUC(dev_priv)) {
  57. if (i915.enable_guc_loading > 0 ||
  58. i915.enable_guc_submission > 0)
  59. DRM_INFO("Ignoring GuC options, no hardware\n");
  60. i915.enable_guc_loading = 0;
  61. i915.enable_guc_submission = 0;
  62. return;
  63. }
  64. /* A negative value means "use platform default" */
  65. if (i915.enable_guc_loading < 0)
  66. i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
  67. /* Verify firmware version */
  68. if (i915.enable_guc_loading) {
  69. if (HAS_HUC_UCODE(dev_priv))
  70. intel_huc_select_fw(&dev_priv->huc);
  71. if (intel_guc_select_fw(&dev_priv->guc))
  72. i915.enable_guc_loading = 0;
  73. }
  74. /* Can't enable guc submission without guc loaded */
  75. if (!i915.enable_guc_loading)
  76. i915.enable_guc_submission = 0;
  77. /* A negative value means "use platform default" */
  78. if (i915.enable_guc_submission < 0)
  79. i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
  80. }
  81. static void gen8_guc_raise_irq(struct intel_guc *guc)
  82. {
  83. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  84. I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
  85. }
  86. void intel_uc_init_early(struct drm_i915_private *dev_priv)
  87. {
  88. struct intel_guc *guc = &dev_priv->guc;
  89. intel_guc_ct_init_early(&guc->ct);
  90. mutex_init(&guc->send_mutex);
  91. guc->send = intel_guc_send_nop;
  92. guc->notify = gen8_guc_raise_irq;
  93. }
  94. static void fetch_uc_fw(struct drm_i915_private *dev_priv,
  95. struct intel_uc_fw *uc_fw)
  96. {
  97. struct pci_dev *pdev = dev_priv->drm.pdev;
  98. struct drm_i915_gem_object *obj;
  99. const struct firmware *fw = NULL;
  100. struct uc_css_header *css;
  101. size_t size;
  102. int err;
  103. if (!uc_fw->path)
  104. return;
  105. uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
  106. DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
  107. intel_uc_fw_status_repr(uc_fw->fetch_status));
  108. err = request_firmware(&fw, uc_fw->path, &pdev->dev);
  109. if (err)
  110. goto fail;
  111. if (!fw)
  112. goto fail;
  113. DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
  114. uc_fw->path, fw);
  115. /* Check the size of the blob before examining buffer contents */
  116. if (fw->size < sizeof(struct uc_css_header)) {
  117. DRM_NOTE("Firmware header is missing\n");
  118. goto fail;
  119. }
  120. css = (struct uc_css_header *)fw->data;
  121. /* Firmware bits always start from header */
  122. uc_fw->header_offset = 0;
  123. uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
  124. css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
  125. if (uc_fw->header_size != sizeof(struct uc_css_header)) {
  126. DRM_NOTE("CSS header definition mismatch\n");
  127. goto fail;
  128. }
  129. /* then, uCode */
  130. uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
  131. uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
  132. /* now RSA */
  133. if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
  134. DRM_NOTE("RSA key size is bad\n");
  135. goto fail;
  136. }
  137. uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
  138. uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
  139. /* At least, it should have header, uCode and RSA. Size of all three. */
  140. size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
  141. if (fw->size < size) {
  142. DRM_NOTE("Missing firmware components\n");
  143. goto fail;
  144. }
  145. /*
  146. * The GuC firmware image has the version number embedded at a
  147. * well-known offset within the firmware blob; note that major / minor
  148. * version are TWO bytes each (i.e. u16), although all pointers and
  149. * offsets are defined in terms of bytes (u8).
  150. */
  151. switch (uc_fw->type) {
  152. case INTEL_UC_FW_TYPE_GUC:
  153. /* Header and uCode will be loaded to WOPCM. Size of the two. */
  154. size = uc_fw->header_size + uc_fw->ucode_size;
  155. /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
  156. if (size > intel_guc_wopcm_size(dev_priv)) {
  157. DRM_ERROR("Firmware is too large to fit in WOPCM\n");
  158. goto fail;
  159. }
  160. uc_fw->major_ver_found = css->guc.sw_version >> 16;
  161. uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
  162. break;
  163. case INTEL_UC_FW_TYPE_HUC:
  164. uc_fw->major_ver_found = css->huc.sw_version >> 16;
  165. uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
  166. break;
  167. default:
  168. DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
  169. err = -ENOEXEC;
  170. goto fail;
  171. }
  172. if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
  173. DRM_NOTE("Skipping %s firmware version check\n",
  174. intel_uc_fw_type_repr(uc_fw->type));
  175. } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
  176. uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
  177. DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
  178. intel_uc_fw_type_repr(uc_fw->type),
  179. uc_fw->major_ver_found, uc_fw->minor_ver_found,
  180. uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
  181. err = -ENOEXEC;
  182. goto fail;
  183. }
  184. DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
  185. uc_fw->major_ver_found, uc_fw->minor_ver_found,
  186. uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
  187. obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
  188. if (IS_ERR(obj)) {
  189. err = PTR_ERR(obj);
  190. goto fail;
  191. }
  192. uc_fw->obj = obj;
  193. uc_fw->size = fw->size;
  194. DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
  195. uc_fw->obj);
  196. release_firmware(fw);
  197. uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
  198. return;
  199. fail:
  200. DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
  201. uc_fw->path, err);
  202. DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
  203. err, fw, uc_fw->obj);
  204. release_firmware(fw); /* OK even if fw is NULL */
  205. uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
  206. }
  207. void intel_uc_init_fw(struct drm_i915_private *dev_priv)
  208. {
  209. fetch_uc_fw(dev_priv, &dev_priv->huc.fw);
  210. fetch_uc_fw(dev_priv, &dev_priv->guc.fw);
  211. }
  212. void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
  213. {
  214. __intel_uc_fw_fini(&dev_priv->guc.fw);
  215. __intel_uc_fw_fini(&dev_priv->huc.fw);
  216. }
  217. static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
  218. {
  219. GEM_BUG_ON(!guc->send_regs.base);
  220. GEM_BUG_ON(!guc->send_regs.count);
  221. GEM_BUG_ON(i >= guc->send_regs.count);
  222. return _MMIO(guc->send_regs.base + 4 * i);
  223. }
  224. static void guc_init_send_regs(struct intel_guc *guc)
  225. {
  226. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  227. enum forcewake_domains fw_domains = 0;
  228. unsigned int i;
  229. guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
  230. guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
  231. for (i = 0; i < guc->send_regs.count; i++) {
  232. fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
  233. guc_send_reg(guc, i),
  234. FW_REG_READ | FW_REG_WRITE);
  235. }
  236. guc->send_regs.fw_domains = fw_domains;
  237. }
  238. static void guc_capture_load_err_log(struct intel_guc *guc)
  239. {
  240. if (!guc->log.vma || i915.guc_log_level < 0)
  241. return;
  242. if (!guc->load_err_log)
  243. guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
  244. return;
  245. }
  246. static void guc_free_load_err_log(struct intel_guc *guc)
  247. {
  248. if (guc->load_err_log)
  249. i915_gem_object_put(guc->load_err_log);
  250. }
  251. static int guc_enable_communication(struct intel_guc *guc)
  252. {
  253. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  254. guc_init_send_regs(guc);
  255. if (HAS_GUC_CT(dev_priv))
  256. return intel_guc_enable_ct(guc);
  257. guc->send = intel_guc_send_mmio;
  258. return 0;
  259. }
  260. static void guc_disable_communication(struct intel_guc *guc)
  261. {
  262. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  263. if (HAS_GUC_CT(dev_priv))
  264. intel_guc_disable_ct(guc);
  265. guc->send = intel_guc_send_nop;
  266. }
  267. int intel_uc_init_hw(struct drm_i915_private *dev_priv)
  268. {
  269. struct intel_guc *guc = &dev_priv->guc;
  270. int ret, attempts;
  271. if (!i915.enable_guc_loading)
  272. return 0;
  273. guc_disable_communication(guc);
  274. gen9_reset_guc_interrupts(dev_priv);
  275. /* We need to notify the guc whenever we change the GGTT */
  276. i915_ggtt_enable_guc(dev_priv);
  277. if (i915.enable_guc_submission) {
  278. /*
  279. * This is stuff we need to have available at fw load time
  280. * if we are planning to enable submission later
  281. */
  282. ret = i915_guc_submission_init(dev_priv);
  283. if (ret)
  284. goto err_guc;
  285. }
  286. /* init WOPCM */
  287. I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
  288. I915_WRITE(DMA_GUC_WOPCM_OFFSET,
  289. GUC_WOPCM_OFFSET_VALUE | HUC_LOADING_AGENT_GUC);
  290. /* WaEnableuKernelHeaderValidFix:skl */
  291. /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
  292. if (IS_GEN9(dev_priv))
  293. attempts = 3;
  294. else
  295. attempts = 1;
  296. while (attempts--) {
  297. /*
  298. * Always reset the GuC just before (re)loading, so
  299. * that the state and timing are fairly predictable
  300. */
  301. ret = __intel_uc_reset_hw(dev_priv);
  302. if (ret)
  303. goto err_submission;
  304. intel_huc_init_hw(&dev_priv->huc);
  305. ret = intel_guc_init_hw(&dev_priv->guc);
  306. if (ret == 0 || ret != -EAGAIN)
  307. break;
  308. DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
  309. "retry %d more time(s)\n", ret, attempts);
  310. }
  311. /* Did we succeded or run out of retries? */
  312. if (ret)
  313. goto err_log_capture;
  314. ret = guc_enable_communication(guc);
  315. if (ret)
  316. goto err_log_capture;
  317. intel_guc_auth_huc(dev_priv);
  318. if (i915.enable_guc_submission) {
  319. if (i915.guc_log_level >= 0)
  320. gen9_enable_guc_interrupts(dev_priv);
  321. ret = i915_guc_submission_enable(dev_priv);
  322. if (ret)
  323. goto err_interrupts;
  324. }
  325. return 0;
  326. /*
  327. * We've failed to load the firmware :(
  328. *
  329. * Decide whether to disable GuC submission and fall back to
  330. * execlist mode, and whether to hide the error by returning
  331. * zero or to return -EIO, which the caller will treat as a
  332. * nonfatal error (i.e. it doesn't prevent driver load, but
  333. * marks the GPU as wedged until reset).
  334. */
  335. err_interrupts:
  336. guc_disable_communication(guc);
  337. gen9_disable_guc_interrupts(dev_priv);
  338. err_log_capture:
  339. guc_capture_load_err_log(guc);
  340. err_submission:
  341. if (i915.enable_guc_submission)
  342. i915_guc_submission_fini(dev_priv);
  343. err_guc:
  344. i915_ggtt_disable_guc(dev_priv);
  345. DRM_ERROR("GuC init failed\n");
  346. if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1)
  347. ret = -EIO;
  348. else
  349. ret = 0;
  350. if (i915.enable_guc_submission) {
  351. i915.enable_guc_submission = 0;
  352. DRM_NOTE("Falling back from GuC submission to execlist mode\n");
  353. }
  354. i915.enable_guc_loading = 0;
  355. DRM_NOTE("GuC firmware loading disabled\n");
  356. return ret;
  357. }
  358. void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
  359. {
  360. guc_free_load_err_log(&dev_priv->guc);
  361. if (!i915.enable_guc_loading)
  362. return;
  363. if (i915.enable_guc_submission)
  364. i915_guc_submission_disable(dev_priv);
  365. guc_disable_communication(&dev_priv->guc);
  366. if (i915.enable_guc_submission) {
  367. gen9_disable_guc_interrupts(dev_priv);
  368. i915_guc_submission_fini(dev_priv);
  369. }
  370. i915_ggtt_disable_guc(dev_priv);
  371. }
  372. int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
  373. {
  374. WARN(1, "Unexpected send: action=%#x\n", *action);
  375. return -ENODEV;
  376. }
  377. /*
  378. * This function implements the MMIO based host to GuC interface.
  379. */
  380. int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
  381. {
  382. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  383. u32 status;
  384. int i;
  385. int ret;
  386. GEM_BUG_ON(!len);
  387. GEM_BUG_ON(len > guc->send_regs.count);
  388. /* If CT is available, we expect to use MMIO only during init/fini */
  389. GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
  390. *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
  391. *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
  392. mutex_lock(&guc->send_mutex);
  393. intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
  394. for (i = 0; i < len; i++)
  395. I915_WRITE(guc_send_reg(guc, i), action[i]);
  396. POSTING_READ(guc_send_reg(guc, i - 1));
  397. intel_guc_notify(guc);
  398. /*
  399. * No GuC command should ever take longer than 10ms.
  400. * Fast commands should still complete in 10us.
  401. */
  402. ret = __intel_wait_for_register_fw(dev_priv,
  403. guc_send_reg(guc, 0),
  404. INTEL_GUC_RECV_MASK,
  405. INTEL_GUC_RECV_MASK,
  406. 10, 10, &status);
  407. if (status != INTEL_GUC_STATUS_SUCCESS) {
  408. /*
  409. * Either the GuC explicitly returned an error (which
  410. * we convert to -EIO here) or no response at all was
  411. * received within the timeout limit (-ETIMEDOUT)
  412. */
  413. if (ret != -ETIMEDOUT)
  414. ret = -EIO;
  415. DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
  416. " ret=%d status=0x%08X response=0x%08X\n",
  417. action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
  418. }
  419. intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
  420. mutex_unlock(&guc->send_mutex);
  421. return ret;
  422. }
  423. int intel_guc_sample_forcewake(struct intel_guc *guc)
  424. {
  425. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  426. u32 action[2];
  427. action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
  428. /* WaRsDisableCoarsePowerGating:skl,bxt */
  429. if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
  430. action[1] = 0;
  431. else
  432. /* bit 0 and 1 are for Render and Media domain separately */
  433. action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
  434. return intel_guc_send(guc, action, ARRAY_SIZE(action));
  435. }