intel_guc_loader.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /*
  2. * Copyright © 2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Vinit Azad <vinit.azad@intel.com>
  25. * Ben Widawsky <ben@bwidawsk.net>
  26. * Dave Gordon <david.s.gordon@intel.com>
  27. * Alex Dai <yu.dai@intel.com>
  28. */
  29. #include <linux/firmware.h>
  30. #include "i915_drv.h"
  31. #include "intel_guc.h"
  32. /**
  33. * DOC: GuC-specific firmware loader
  34. *
  35. * intel_guc:
  36. * Top level structure of guc. It handles firmware loading and manages client
  37. * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
  38. * ExecList submission.
  39. *
  40. * Firmware versioning:
  41. * The firmware build process will generate a version header file with major and
  42. * minor version defined. The versions are built into CSS header of firmware.
  43. * i915 kernel driver set the minimal firmware version required per platform.
  44. * The firmware installation package will install (symbolic link) proper version
  45. * of firmware.
  46. *
  47. * GuC address space:
  48. * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
  49. * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
  50. * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
  51. * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
  52. *
  53. * Firmware log:
  54. * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
  55. * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
  56. * i915_guc_load_status will print out firmware loading status and scratch
  57. * registers value.
  58. *
  59. */
  60. #define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
  61. MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
  62. #define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
  63. MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
  64. /* User-friendly representation of an enum */
  65. const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
  66. {
  67. switch (status) {
  68. case GUC_FIRMWARE_FAIL:
  69. return "FAIL";
  70. case GUC_FIRMWARE_NONE:
  71. return "NONE";
  72. case GUC_FIRMWARE_PENDING:
  73. return "PENDING";
  74. case GUC_FIRMWARE_SUCCESS:
  75. return "SUCCESS";
  76. default:
  77. return "UNKNOWN!";
  78. }
  79. };
  80. static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
  81. {
  82. struct intel_engine_cs *engine;
  83. int irqs;
  84. /* tell all command streamers NOT to forward interrupts and vblank to GuC */
  85. irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
  86. irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
  87. for_each_engine(engine, dev_priv)
  88. I915_WRITE(RING_MODE_GEN7(engine), irqs);
  89. /* route all GT interrupts to the host */
  90. I915_WRITE(GUC_BCS_RCS_IER, 0);
  91. I915_WRITE(GUC_VCS2_VCS1_IER, 0);
  92. I915_WRITE(GUC_WD_VECS_IER, 0);
  93. }
  94. static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
  95. {
  96. struct intel_engine_cs *engine;
  97. int irqs;
  98. u32 tmp;
  99. /* tell all command streamers to forward interrupts and vblank to GuC */
  100. irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
  101. irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
  102. for_each_engine(engine, dev_priv)
  103. I915_WRITE(RING_MODE_GEN7(engine), irqs);
  104. /* route USER_INTERRUPT to Host, all others are sent to GuC. */
  105. irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  106. GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
  107. /* These three registers have the same bit definitions */
  108. I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
  109. I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
  110. I915_WRITE(GUC_WD_VECS_IER, ~irqs);
  111. /*
  112. * If GuC has routed PM interrupts to itself, don't keep it.
  113. * and keep other interrupts those are unmasked by GuC.
  114. */
  115. tmp = I915_READ(GEN6_PMINTRMSK);
  116. if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
  117. dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
  118. dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
  119. }
  120. }
  121. static u32 get_gttype(struct drm_i915_private *dev_priv)
  122. {
  123. /* XXX: GT type based on PCI device ID? field seems unused by fw */
  124. return 0;
  125. }
  126. static u32 get_core_family(struct drm_i915_private *dev_priv)
  127. {
  128. switch (INTEL_INFO(dev_priv)->gen) {
  129. case 9:
  130. return GFXCORE_FAMILY_GEN9;
  131. default:
  132. DRM_ERROR("GUC: unsupported core family\n");
  133. return GFXCORE_FAMILY_UNKNOWN;
  134. }
  135. }
  136. static void set_guc_init_params(struct drm_i915_private *dev_priv)
  137. {
  138. struct intel_guc *guc = &dev_priv->guc;
  139. u32 params[GUC_CTL_MAX_DWORDS];
  140. int i;
  141. memset(&params, 0, sizeof(params));
  142. params[GUC_CTL_DEVICE_INFO] |=
  143. (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
  144. (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
  145. /*
  146. * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
  147. * second. This ARAR is calculated by:
  148. * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
  149. */
  150. params[GUC_CTL_ARAT_HIGH] = 0;
  151. params[GUC_CTL_ARAT_LOW] = 100000000;
  152. params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
  153. params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
  154. GUC_CTL_VCS2_ENABLED;
  155. if (i915.guc_log_level >= 0) {
  156. params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
  157. params[GUC_CTL_DEBUG] =
  158. i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
  159. }
  160. if (guc->ads_obj) {
  161. u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
  162. >> PAGE_SHIFT;
  163. params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
  164. params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
  165. }
  166. /* If GuC submission is enabled, set up additional parameters here */
  167. if (i915.enable_guc_submission) {
  168. u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
  169. u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
  170. pgs >>= PAGE_SHIFT;
  171. params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
  172. (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
  173. params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
  174. /* Unmask this bit to enable the GuC's internal scheduler */
  175. params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
  176. }
  177. I915_WRITE(SOFT_SCRATCH(0), 0);
  178. for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
  179. I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
  180. }
  181. /*
  182. * Read the GuC status register (GUC_STATUS) and store it in the
  183. * specified location; then return a boolean indicating whether
  184. * the value matches either of two values representing completion
  185. * of the GuC boot process.
  186. *
  187. * This is used for polling the GuC status in a wait_for()
  188. * loop below.
  189. */
  190. static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
  191. u32 *status)
  192. {
  193. u32 val = I915_READ(GUC_STATUS);
  194. u32 uk_val = val & GS_UKERNEL_MASK;
  195. *status = val;
  196. return (uk_val == GS_UKERNEL_READY ||
  197. ((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
  198. }
  199. /*
  200. * Transfer the firmware image to RAM for execution by the microcontroller.
  201. *
  202. * Architecturally, the DMA engine is bidirectional, and can potentially even
  203. * transfer between GTT locations. This functionality is left out of the API
  204. * for now as there is no need for it.
  205. *
  206. * Note that GuC needs the CSS header plus uKernel code to be copied by the
  207. * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
  208. */
  209. static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
  210. {
  211. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  212. struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
  213. unsigned long offset;
  214. struct sg_table *sg = fw_obj->pages;
  215. u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
  216. int i, ret = 0;
  217. /* where RSA signature starts */
  218. offset = guc_fw->rsa_offset;
  219. /* Copy RSA signature from the fw image to HW for verification */
  220. sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
  221. for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
  222. I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
  223. /* The header plus uCode will be copied to WOPCM via DMA, excluding any
  224. * other components */
  225. I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
  226. /* Set the source address for the new blob */
  227. offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
  228. I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
  229. I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
  230. /*
  231. * Set the DMA destination. Current uCode expects the code to be
  232. * loaded at 8k; locations below this are used for the stack.
  233. */
  234. I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
  235. I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
  236. /* Finally start the DMA */
  237. I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
  238. /*
  239. * Wait for the DMA to complete & the GuC to start up.
  240. * NB: Docs recommend not using the interrupt for completion.
  241. * Measurements indicate this should take no more than 20ms, so a
  242. * timeout here indicates that the GuC has failed and is unusable.
  243. * (Higher levels of the driver will attempt to fall back to
  244. * execlist mode if this happens.)
  245. */
  246. ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
  247. DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
  248. I915_READ(DMA_CTRL), status);
  249. if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
  250. DRM_ERROR("GuC firmware signature verification failed\n");
  251. ret = -ENOEXEC;
  252. }
  253. DRM_DEBUG_DRIVER("returning %d\n", ret);
  254. return ret;
  255. }
  256. static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
  257. {
  258. u32 wopcm_size = GUC_WOPCM_TOP;
  259. /* On BXT, the top of WOPCM is reserved for RC6 context */
  260. if (IS_BROXTON(dev_priv))
  261. wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
  262. return wopcm_size;
  263. }
  264. /*
  265. * Load the GuC firmware blob into the MinuteIA.
  266. */
  267. static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
  268. {
  269. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  270. struct drm_device *dev = dev_priv->dev;
  271. int ret;
  272. ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
  273. if (ret) {
  274. DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
  275. return ret;
  276. }
  277. ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
  278. if (ret) {
  279. DRM_DEBUG_DRIVER("pin failed %d\n", ret);
  280. return ret;
  281. }
  282. /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
  283. I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
  284. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  285. /* init WOPCM */
  286. I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
  287. I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
  288. /* Enable MIA caching. GuC clock gating is disabled. */
  289. I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
  290. /* WaDisableMinuteIaClockGating:skl,bxt */
  291. if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
  292. IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
  293. I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
  294. ~GUC_ENABLE_MIA_CLOCK_GATING));
  295. }
  296. /* WaC6DisallowByGfxPause*/
  297. I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
  298. if (IS_BROXTON(dev))
  299. I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
  300. else
  301. I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
  302. if (IS_GEN9(dev)) {
  303. /* DOP Clock Gating Enable for GuC clocks */
  304. I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
  305. I915_READ(GEN7_MISCCPCTL)));
  306. /* allows for 5us before GT can go to RC6 */
  307. I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
  308. }
  309. set_guc_init_params(dev_priv);
  310. ret = guc_ucode_xfer_dma(dev_priv);
  311. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  312. /*
  313. * We keep the object pages for reuse during resume. But we can unpin it
  314. * now that DMA has completed, so it doesn't continue to take up space.
  315. */
  316. i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
  317. return ret;
  318. }
  319. static int i915_reset_guc(struct drm_i915_private *dev_priv)
  320. {
  321. int ret;
  322. u32 guc_status;
  323. ret = intel_guc_reset(dev_priv);
  324. if (ret) {
  325. DRM_ERROR("GuC reset failed, ret = %d\n", ret);
  326. return ret;
  327. }
  328. guc_status = I915_READ(GUC_STATUS);
  329. WARN(!(guc_status & GS_MIA_IN_RESET),
  330. "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
  331. return ret;
  332. }
  333. /**
  334. * intel_guc_setup() - finish preparing the GuC for activity
  335. * @dev: drm device
  336. *
  337. * Called from gem_init_hw() during driver loading and also after a GPU reset.
  338. *
  339. * The main action required here it to load the GuC uCode into the device.
  340. * The firmware image should have already been fetched into memory by the
  341. * earlier call to intel_guc_init(), so here we need only check that worked,
  342. * and then transfer the image to the h/w.
  343. *
  344. * Return: non-zero code on error
  345. */
  346. int intel_guc_setup(struct drm_device *dev)
  347. {
  348. struct drm_i915_private *dev_priv = dev->dev_private;
  349. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  350. const char *fw_path = guc_fw->guc_fw_path;
  351. int retries, ret, err;
  352. DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
  353. fw_path,
  354. intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
  355. intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
  356. /* Loading forbidden, or no firmware to load? */
  357. if (!i915.enable_guc_loading) {
  358. err = 0;
  359. goto fail;
  360. } else if (fw_path == NULL || *fw_path == '\0') {
  361. if (*fw_path == '\0')
  362. DRM_INFO("No GuC firmware known for this platform\n");
  363. err = -ENODEV;
  364. goto fail;
  365. }
  366. /* Fetch failed, or already fetched but failed to load? */
  367. if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
  368. err = -EIO;
  369. goto fail;
  370. } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
  371. err = -ENOEXEC;
  372. goto fail;
  373. }
  374. direct_interrupts_to_host(dev_priv);
  375. guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
  376. DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
  377. intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
  378. intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
  379. err = i915_guc_submission_init(dev);
  380. if (err)
  381. goto fail;
  382. /*
  383. * WaEnableuKernelHeaderValidFix:skl,bxt
  384. * For BXT, this is only upto B0 but below WA is required for later
  385. * steppings also so this is extended as well.
  386. */
  387. /* WaEnableGuCBootHashCheckNotSet:skl,bxt */
  388. for (retries = 3; ; ) {
  389. /*
  390. * Always reset the GuC just before (re)loading, so
  391. * that the state and timing are fairly predictable
  392. */
  393. err = i915_reset_guc(dev_priv);
  394. if (err) {
  395. DRM_ERROR("GuC reset failed: %d\n", err);
  396. goto fail;
  397. }
  398. err = guc_ucode_xfer(dev_priv);
  399. if (!err)
  400. break;
  401. if (--retries == 0)
  402. goto fail;
  403. DRM_INFO("GuC fw load failed: %d; will reset and "
  404. "retry %d more time(s)\n", err, retries);
  405. }
  406. guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
  407. DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
  408. intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
  409. intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
  410. if (i915.enable_guc_submission) {
  411. /* The execbuf_client will be recreated. Release it first. */
  412. i915_guc_submission_disable(dev);
  413. err = i915_guc_submission_enable(dev);
  414. if (err)
  415. goto fail;
  416. direct_interrupts_to_guc(dev_priv);
  417. }
  418. return 0;
  419. fail:
  420. if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
  421. guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
  422. direct_interrupts_to_host(dev_priv);
  423. i915_guc_submission_disable(dev);
  424. i915_guc_submission_fini(dev);
  425. /*
  426. * We've failed to load the firmware :(
  427. *
  428. * Decide whether to disable GuC submission and fall back to
  429. * execlist mode, and whether to hide the error by returning
  430. * zero or to return -EIO, which the caller will treat as a
  431. * nonfatal error (i.e. it doesn't prevent driver load, but
  432. * marks the GPU as wedged until reset).
  433. */
  434. if (i915.enable_guc_loading > 1) {
  435. ret = -EIO;
  436. } else if (i915.enable_guc_submission > 1) {
  437. ret = -EIO;
  438. } else {
  439. ret = 0;
  440. }
  441. if (err == 0)
  442. DRM_INFO("GuC firmware load skipped\n");
  443. else if (ret == -EIO)
  444. DRM_ERROR("GuC firmware load failed: %d\n", err);
  445. else
  446. DRM_INFO("GuC firmware load failed: %d\n", err);
  447. if (i915.enable_guc_submission) {
  448. if (fw_path == NULL)
  449. DRM_INFO("GuC submission without firmware not supported\n");
  450. if (ret == 0)
  451. DRM_INFO("Falling back to execlist mode\n");
  452. else
  453. DRM_ERROR("GuC init failed: %d\n", ret);
  454. }
  455. i915.enable_guc_submission = 0;
  456. return ret;
  457. }
  458. static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
  459. {
  460. struct drm_i915_gem_object *obj;
  461. const struct firmware *fw;
  462. struct guc_css_header *css;
  463. size_t size;
  464. int err;
  465. DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
  466. intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
  467. err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
  468. if (err)
  469. goto fail;
  470. if (!fw)
  471. goto fail;
  472. DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
  473. guc_fw->guc_fw_path, fw);
  474. /* Check the size of the blob before examining buffer contents */
  475. if (fw->size < sizeof(struct guc_css_header)) {
  476. DRM_ERROR("Firmware header is missing\n");
  477. goto fail;
  478. }
  479. css = (struct guc_css_header *)fw->data;
  480. /* Firmware bits always start from header */
  481. guc_fw->header_offset = 0;
  482. guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
  483. css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
  484. if (guc_fw->header_size != sizeof(struct guc_css_header)) {
  485. DRM_ERROR("CSS header definition mismatch\n");
  486. goto fail;
  487. }
  488. /* then, uCode */
  489. guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
  490. guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
  491. /* now RSA */
  492. if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
  493. DRM_ERROR("RSA key size is bad\n");
  494. goto fail;
  495. }
  496. guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
  497. guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
  498. /* At least, it should have header, uCode and RSA. Size of all three. */
  499. size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
  500. if (fw->size < size) {
  501. DRM_ERROR("Missing firmware components\n");
  502. goto fail;
  503. }
  504. /* Header and uCode will be loaded to WOPCM. Size of the two. */
  505. size = guc_fw->header_size + guc_fw->ucode_size;
  506. if (size > guc_wopcm_size(dev->dev_private)) {
  507. DRM_ERROR("Firmware is too large to fit in WOPCM\n");
  508. goto fail;
  509. }
  510. /*
  511. * The GuC firmware image has the version number embedded at a well-known
  512. * offset within the firmware blob; note that major / minor version are
  513. * TWO bytes each (i.e. u16), although all pointers and offsets are defined
  514. * in terms of bytes (u8).
  515. */
  516. guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
  517. guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
  518. if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
  519. guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
  520. DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
  521. guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
  522. guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
  523. err = -ENOEXEC;
  524. goto fail;
  525. }
  526. DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
  527. guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
  528. guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
  529. mutex_lock(&dev->struct_mutex);
  530. obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
  531. mutex_unlock(&dev->struct_mutex);
  532. if (IS_ERR_OR_NULL(obj)) {
  533. err = obj ? PTR_ERR(obj) : -ENOMEM;
  534. goto fail;
  535. }
  536. guc_fw->guc_fw_obj = obj;
  537. guc_fw->guc_fw_size = fw->size;
  538. DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
  539. guc_fw->guc_fw_obj);
  540. release_firmware(fw);
  541. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
  542. return;
  543. fail:
  544. DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
  545. err, fw, guc_fw->guc_fw_obj);
  546. DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
  547. guc_fw->guc_fw_path, err);
  548. mutex_lock(&dev->struct_mutex);
  549. obj = guc_fw->guc_fw_obj;
  550. if (obj)
  551. drm_gem_object_unreference(&obj->base);
  552. guc_fw->guc_fw_obj = NULL;
  553. mutex_unlock(&dev->struct_mutex);
  554. release_firmware(fw); /* OK even if fw is NULL */
  555. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
  556. }
  557. /**
  558. * intel_guc_init() - define parameters and fetch firmware
  559. * @dev: drm device
  560. *
  561. * Called early during driver load, but after GEM is initialised.
  562. *
  563. * The firmware will be transferred to the GuC's memory later,
  564. * when intel_guc_setup() is called.
  565. */
  566. void intel_guc_init(struct drm_device *dev)
  567. {
  568. struct drm_i915_private *dev_priv = dev->dev_private;
  569. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  570. const char *fw_path;
  571. /* A negative value means "use platform default" */
  572. if (i915.enable_guc_loading < 0)
  573. i915.enable_guc_loading = HAS_GUC_UCODE(dev);
  574. if (i915.enable_guc_submission < 0)
  575. i915.enable_guc_submission = HAS_GUC_SCHED(dev);
  576. if (!HAS_GUC_UCODE(dev)) {
  577. fw_path = NULL;
  578. } else if (IS_SKYLAKE(dev)) {
  579. fw_path = I915_SKL_GUC_UCODE;
  580. guc_fw->guc_fw_major_wanted = 6;
  581. guc_fw->guc_fw_minor_wanted = 1;
  582. } else if (IS_BROXTON(dev)) {
  583. fw_path = I915_BXT_GUC_UCODE;
  584. guc_fw->guc_fw_major_wanted = 8;
  585. guc_fw->guc_fw_minor_wanted = 7;
  586. } else {
  587. fw_path = ""; /* unknown device */
  588. }
  589. guc_fw->guc_dev = dev;
  590. guc_fw->guc_fw_path = fw_path;
  591. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
  592. guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
  593. /* Early (and silent) return if GuC loading is disabled */
  594. if (!i915.enable_guc_loading)
  595. return;
  596. if (fw_path == NULL)
  597. return;
  598. if (*fw_path == '\0')
  599. return;
  600. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
  601. DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
  602. guc_fw_fetch(dev, guc_fw);
  603. /* status must now be FAIL or SUCCESS */
  604. }
  605. /**
  606. * intel_guc_fini() - clean up all allocated resources
  607. * @dev: drm device
  608. */
  609. void intel_guc_fini(struct drm_device *dev)
  610. {
  611. struct drm_i915_private *dev_priv = dev->dev_private;
  612. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  613. mutex_lock(&dev->struct_mutex);
  614. direct_interrupts_to_host(dev_priv);
  615. i915_guc_submission_disable(dev);
  616. i915_guc_submission_fini(dev);
  617. if (guc_fw->guc_fw_obj)
  618. drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
  619. guc_fw->guc_fw_obj = NULL;
  620. mutex_unlock(&dev->struct_mutex);
  621. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
  622. }