intel_engine_cs.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <drm/drm_print.h>
  25. #include "i915_drv.h"
  26. #include "i915_vgpu.h"
  27. #include "intel_ringbuffer.h"
  28. #include "intel_lrc.h"
  29. /* Haswell does have the CXT_SIZE register however it does not appear to be
  30. * valid. Now, docs explain in dwords what is in the context object. The full
  31. * size is 70720 bytes, however, the power context and execlist context will
  32. * never be saved (power context is stored elsewhere, and execlists don't work
  33. * on HSW) - so the final size, including the extra state required for the
  34. * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
  35. */
  36. #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
  37. #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
  38. #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
  39. #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
  40. #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
  41. struct engine_class_info {
  42. const char *name;
  43. int (*init_legacy)(struct intel_engine_cs *engine);
  44. int (*init_execlists)(struct intel_engine_cs *engine);
  45. u8 uabi_class;
  46. };
  47. static const struct engine_class_info intel_engine_classes[] = {
  48. [RENDER_CLASS] = {
  49. .name = "rcs",
  50. .init_execlists = logical_render_ring_init,
  51. .init_legacy = intel_init_render_ring_buffer,
  52. .uabi_class = I915_ENGINE_CLASS_RENDER,
  53. },
  54. [COPY_ENGINE_CLASS] = {
  55. .name = "bcs",
  56. .init_execlists = logical_xcs_ring_init,
  57. .init_legacy = intel_init_blt_ring_buffer,
  58. .uabi_class = I915_ENGINE_CLASS_COPY,
  59. },
  60. [VIDEO_DECODE_CLASS] = {
  61. .name = "vcs",
  62. .init_execlists = logical_xcs_ring_init,
  63. .init_legacy = intel_init_bsd_ring_buffer,
  64. .uabi_class = I915_ENGINE_CLASS_VIDEO,
  65. },
  66. [VIDEO_ENHANCEMENT_CLASS] = {
  67. .name = "vecs",
  68. .init_execlists = logical_xcs_ring_init,
  69. .init_legacy = intel_init_vebox_ring_buffer,
  70. .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
  71. },
  72. };
  73. struct engine_info {
  74. unsigned int hw_id;
  75. unsigned int uabi_id;
  76. u8 class;
  77. u8 instance;
  78. u32 mmio_base;
  79. unsigned irq_shift;
  80. };
  81. static const struct engine_info intel_engines[] = {
  82. [RCS] = {
  83. .hw_id = RCS_HW,
  84. .uabi_id = I915_EXEC_RENDER,
  85. .class = RENDER_CLASS,
  86. .instance = 0,
  87. .mmio_base = RENDER_RING_BASE,
  88. .irq_shift = GEN8_RCS_IRQ_SHIFT,
  89. },
  90. [BCS] = {
  91. .hw_id = BCS_HW,
  92. .uabi_id = I915_EXEC_BLT,
  93. .class = COPY_ENGINE_CLASS,
  94. .instance = 0,
  95. .mmio_base = BLT_RING_BASE,
  96. .irq_shift = GEN8_BCS_IRQ_SHIFT,
  97. },
  98. [VCS] = {
  99. .hw_id = VCS_HW,
  100. .uabi_id = I915_EXEC_BSD,
  101. .class = VIDEO_DECODE_CLASS,
  102. .instance = 0,
  103. .mmio_base = GEN6_BSD_RING_BASE,
  104. .irq_shift = GEN8_VCS1_IRQ_SHIFT,
  105. },
  106. [VCS2] = {
  107. .hw_id = VCS2_HW,
  108. .uabi_id = I915_EXEC_BSD,
  109. .class = VIDEO_DECODE_CLASS,
  110. .instance = 1,
  111. .mmio_base = GEN8_BSD2_RING_BASE,
  112. .irq_shift = GEN8_VCS2_IRQ_SHIFT,
  113. },
  114. [VECS] = {
  115. .hw_id = VECS_HW,
  116. .uabi_id = I915_EXEC_VEBOX,
  117. .class = VIDEO_ENHANCEMENT_CLASS,
  118. .instance = 0,
  119. .mmio_base = VEBOX_RING_BASE,
  120. .irq_shift = GEN8_VECS_IRQ_SHIFT,
  121. },
  122. };
  123. /**
  124. * ___intel_engine_context_size() - return the size of the context for an engine
  125. * @dev_priv: i915 device private
  126. * @class: engine class
  127. *
  128. * Each engine class may require a different amount of space for a context
  129. * image.
  130. *
  131. * Return: size (in bytes) of an engine class specific context image
  132. *
  133. * Note: this size includes the HWSP, which is part of the context image
  134. * in LRC mode, but does not include the "shared data page" used with
  135. * GuC submission. The caller should account for this if using the GuC.
  136. */
  137. static u32
  138. __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
  139. {
  140. u32 cxt_size;
  141. BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
  142. switch (class) {
  143. case RENDER_CLASS:
  144. switch (INTEL_GEN(dev_priv)) {
  145. default:
  146. MISSING_CASE(INTEL_GEN(dev_priv));
  147. case 10:
  148. return GEN10_LR_CONTEXT_RENDER_SIZE;
  149. case 9:
  150. return GEN9_LR_CONTEXT_RENDER_SIZE;
  151. case 8:
  152. return GEN8_LR_CONTEXT_RENDER_SIZE;
  153. case 7:
  154. if (IS_HASWELL(dev_priv))
  155. return HSW_CXT_TOTAL_SIZE;
  156. cxt_size = I915_READ(GEN7_CXT_SIZE);
  157. return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
  158. PAGE_SIZE);
  159. case 6:
  160. cxt_size = I915_READ(CXT_SIZE);
  161. return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
  162. PAGE_SIZE);
  163. case 5:
  164. case 4:
  165. case 3:
  166. case 2:
  167. /* For the special day when i810 gets merged. */
  168. case 1:
  169. return 0;
  170. }
  171. break;
  172. default:
  173. MISSING_CASE(class);
  174. case VIDEO_DECODE_CLASS:
  175. case VIDEO_ENHANCEMENT_CLASS:
  176. case COPY_ENGINE_CLASS:
  177. if (INTEL_GEN(dev_priv) < 8)
  178. return 0;
  179. return GEN8_LR_CONTEXT_OTHER_SIZE;
  180. }
  181. }
  182. static int
  183. intel_engine_setup(struct drm_i915_private *dev_priv,
  184. enum intel_engine_id id)
  185. {
  186. const struct engine_info *info = &intel_engines[id];
  187. const struct engine_class_info *class_info;
  188. struct intel_engine_cs *engine;
  189. GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
  190. class_info = &intel_engine_classes[info->class];
  191. if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
  192. return -EINVAL;
  193. if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
  194. return -EINVAL;
  195. if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
  196. return -EINVAL;
  197. GEM_BUG_ON(dev_priv->engine[id]);
  198. engine = kzalloc(sizeof(*engine), GFP_KERNEL);
  199. if (!engine)
  200. return -ENOMEM;
  201. engine->id = id;
  202. engine->i915 = dev_priv;
  203. WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
  204. class_info->name, info->instance) >=
  205. sizeof(engine->name));
  206. engine->hw_id = engine->guc_id = info->hw_id;
  207. engine->mmio_base = info->mmio_base;
  208. engine->irq_shift = info->irq_shift;
  209. engine->class = info->class;
  210. engine->instance = info->instance;
  211. engine->uabi_id = info->uabi_id;
  212. engine->uabi_class = class_info->uabi_class;
  213. engine->context_size = __intel_engine_context_size(dev_priv,
  214. engine->class);
  215. if (WARN_ON(engine->context_size > BIT(20)))
  216. engine->context_size = 0;
  217. /* Nothing to do here, execute in order of dependencies */
  218. engine->schedule = NULL;
  219. spin_lock_init(&engine->stats.lock);
  220. ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
  221. dev_priv->engine_class[info->class][info->instance] = engine;
  222. dev_priv->engine[id] = engine;
  223. return 0;
  224. }
  225. /**
  226. * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
  227. * @dev_priv: i915 device private
  228. *
  229. * Return: non-zero if the initialization failed.
  230. */
  231. int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
  232. {
  233. struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
  234. const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
  235. struct intel_engine_cs *engine;
  236. enum intel_engine_id id;
  237. unsigned int mask = 0;
  238. unsigned int i;
  239. int err;
  240. WARN_ON(ring_mask == 0);
  241. WARN_ON(ring_mask &
  242. GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
  243. for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
  244. if (!HAS_ENGINE(dev_priv, i))
  245. continue;
  246. err = intel_engine_setup(dev_priv, i);
  247. if (err)
  248. goto cleanup;
  249. mask |= ENGINE_MASK(i);
  250. }
  251. /*
  252. * Catch failures to update intel_engines table when the new engines
  253. * are added to the driver by a warning and disabling the forgotten
  254. * engines.
  255. */
  256. if (WARN_ON(mask != ring_mask))
  257. device_info->ring_mask = mask;
  258. /* We always presume we have at least RCS available for later probing */
  259. if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
  260. err = -ENODEV;
  261. goto cleanup;
  262. }
  263. device_info->num_rings = hweight32(mask);
  264. i915_check_and_clear_faults(dev_priv);
  265. return 0;
  266. cleanup:
  267. for_each_engine(engine, dev_priv, id)
  268. kfree(engine);
  269. return err;
  270. }
  271. /**
  272. * intel_engines_init() - init the Engine Command Streamers
  273. * @dev_priv: i915 device private
  274. *
  275. * Return: non-zero if the initialization failed.
  276. */
  277. int intel_engines_init(struct drm_i915_private *dev_priv)
  278. {
  279. struct intel_engine_cs *engine;
  280. enum intel_engine_id id, err_id;
  281. int err;
  282. for_each_engine(engine, dev_priv, id) {
  283. const struct engine_class_info *class_info =
  284. &intel_engine_classes[engine->class];
  285. int (*init)(struct intel_engine_cs *engine);
  286. if (HAS_EXECLISTS(dev_priv))
  287. init = class_info->init_execlists;
  288. else
  289. init = class_info->init_legacy;
  290. err = -EINVAL;
  291. err_id = id;
  292. if (GEM_WARN_ON(!init))
  293. goto cleanup;
  294. err = init(engine);
  295. if (err)
  296. goto cleanup;
  297. GEM_BUG_ON(!engine->submit_request);
  298. }
  299. return 0;
  300. cleanup:
  301. for_each_engine(engine, dev_priv, id) {
  302. if (id >= err_id) {
  303. kfree(engine);
  304. dev_priv->engine[id] = NULL;
  305. } else {
  306. dev_priv->gt.cleanup_engine(engine);
  307. }
  308. }
  309. return err;
  310. }
  311. void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
  312. {
  313. struct drm_i915_private *dev_priv = engine->i915;
  314. /* Our semaphore implementation is strictly monotonic (i.e. we proceed
  315. * so long as the semaphore value in the register/page is greater
  316. * than the sync value), so whenever we reset the seqno,
  317. * so long as we reset the tracking semaphore value to 0, it will
  318. * always be before the next request's seqno. If we don't reset
  319. * the semaphore value, then when the seqno moves backwards all
  320. * future waits will complete instantly (causing rendering corruption).
  321. */
  322. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
  323. I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
  324. I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
  325. if (HAS_VEBOX(dev_priv))
  326. I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
  327. }
  328. intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
  329. clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
  330. /* After manually advancing the seqno, fake the interrupt in case
  331. * there are any waiters for that seqno.
  332. */
  333. intel_engine_wakeup(engine);
  334. GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
  335. }
  336. static void intel_engine_init_timeline(struct intel_engine_cs *engine)
  337. {
  338. engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
  339. }
  340. static bool csb_force_mmio(struct drm_i915_private *i915)
  341. {
  342. /*
  343. * IOMMU adds unpredictable latency causing the CSB write (from the
  344. * GPU into the HWSP) to only be visible some time after the interrupt
  345. * (missed breadcrumb syndrome).
  346. */
  347. if (intel_vtd_active())
  348. return true;
  349. /* Older GVT emulation depends upon intercepting CSB mmio */
  350. if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
  351. return true;
  352. return false;
  353. }
  354. static void intel_engine_init_execlist(struct intel_engine_cs *engine)
  355. {
  356. struct intel_engine_execlists * const execlists = &engine->execlists;
  357. execlists->csb_use_mmio = csb_force_mmio(engine->i915);
  358. execlists->port_mask = 1;
  359. BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
  360. GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
  361. execlists->queue = RB_ROOT;
  362. execlists->first = NULL;
  363. }
  364. /**
  365. * intel_engines_setup_common - setup engine state not requiring hw access
  366. * @engine: Engine to setup.
  367. *
  368. * Initializes @engine@ structure members shared between legacy and execlists
  369. * submission modes which do not require hardware access.
  370. *
  371. * Typically done early in the submission mode specific engine setup stage.
  372. */
  373. void intel_engine_setup_common(struct intel_engine_cs *engine)
  374. {
  375. intel_engine_init_execlist(engine);
  376. intel_engine_init_timeline(engine);
  377. intel_engine_init_hangcheck(engine);
  378. i915_gem_batch_pool_init(engine, &engine->batch_pool);
  379. intel_engine_init_cmd_parser(engine);
  380. }
  381. int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
  382. {
  383. struct drm_i915_gem_object *obj;
  384. struct i915_vma *vma;
  385. int ret;
  386. WARN_ON(engine->scratch);
  387. obj = i915_gem_object_create_stolen(engine->i915, size);
  388. if (!obj)
  389. obj = i915_gem_object_create_internal(engine->i915, size);
  390. if (IS_ERR(obj)) {
  391. DRM_ERROR("Failed to allocate scratch page\n");
  392. return PTR_ERR(obj);
  393. }
  394. vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
  395. if (IS_ERR(vma)) {
  396. ret = PTR_ERR(vma);
  397. goto err_unref;
  398. }
  399. ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
  400. if (ret)
  401. goto err_unref;
  402. engine->scratch = vma;
  403. DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
  404. engine->name, i915_ggtt_offset(vma));
  405. return 0;
  406. err_unref:
  407. i915_gem_object_put(obj);
  408. return ret;
  409. }
  410. static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
  411. {
  412. i915_vma_unpin_and_release(&engine->scratch);
  413. }
  414. static void cleanup_phys_status_page(struct intel_engine_cs *engine)
  415. {
  416. struct drm_i915_private *dev_priv = engine->i915;
  417. if (!dev_priv->status_page_dmah)
  418. return;
  419. drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
  420. engine->status_page.page_addr = NULL;
  421. }
  422. static void cleanup_status_page(struct intel_engine_cs *engine)
  423. {
  424. struct i915_vma *vma;
  425. struct drm_i915_gem_object *obj;
  426. vma = fetch_and_zero(&engine->status_page.vma);
  427. if (!vma)
  428. return;
  429. obj = vma->obj;
  430. i915_vma_unpin(vma);
  431. i915_vma_close(vma);
  432. i915_gem_object_unpin_map(obj);
  433. __i915_gem_object_release_unless_active(obj);
  434. }
  435. static int init_status_page(struct intel_engine_cs *engine)
  436. {
  437. struct drm_i915_gem_object *obj;
  438. struct i915_vma *vma;
  439. unsigned int flags;
  440. void *vaddr;
  441. int ret;
  442. obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
  443. if (IS_ERR(obj)) {
  444. DRM_ERROR("Failed to allocate status page\n");
  445. return PTR_ERR(obj);
  446. }
  447. ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  448. if (ret)
  449. goto err;
  450. vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
  451. if (IS_ERR(vma)) {
  452. ret = PTR_ERR(vma);
  453. goto err;
  454. }
  455. flags = PIN_GLOBAL;
  456. if (!HAS_LLC(engine->i915))
  457. /* On g33, we cannot place HWS above 256MiB, so
  458. * restrict its pinning to the low mappable arena.
  459. * Though this restriction is not documented for
  460. * gen4, gen5, or byt, they also behave similarly
  461. * and hang if the HWS is placed at the top of the
  462. * GTT. To generalise, it appears that all !llc
  463. * platforms have issues with us placing the HWS
  464. * above the mappable region (even though we never
  465. * actually map it).
  466. */
  467. flags |= PIN_MAPPABLE;
  468. else
  469. flags |= PIN_HIGH;
  470. ret = i915_vma_pin(vma, 0, 4096, flags);
  471. if (ret)
  472. goto err;
  473. vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
  474. if (IS_ERR(vaddr)) {
  475. ret = PTR_ERR(vaddr);
  476. goto err_unpin;
  477. }
  478. engine->status_page.vma = vma;
  479. engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
  480. engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
  481. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  482. engine->name, i915_ggtt_offset(vma));
  483. return 0;
  484. err_unpin:
  485. i915_vma_unpin(vma);
  486. err:
  487. i915_gem_object_put(obj);
  488. return ret;
  489. }
  490. static int init_phys_status_page(struct intel_engine_cs *engine)
  491. {
  492. struct drm_i915_private *dev_priv = engine->i915;
  493. GEM_BUG_ON(engine->id != RCS);
  494. dev_priv->status_page_dmah =
  495. drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
  496. if (!dev_priv->status_page_dmah)
  497. return -ENOMEM;
  498. engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  499. memset(engine->status_page.page_addr, 0, PAGE_SIZE);
  500. return 0;
  501. }
  502. /**
  503. * intel_engines_init_common - initialize cengine state which might require hw access
  504. * @engine: Engine to initialize.
  505. *
  506. * Initializes @engine@ structure members shared between legacy and execlists
  507. * submission modes which do require hardware access.
  508. *
  509. * Typcally done at later stages of submission mode specific engine setup.
  510. *
  511. * Returns zero on success or an error code on failure.
  512. */
  513. int intel_engine_init_common(struct intel_engine_cs *engine)
  514. {
  515. struct intel_ring *ring;
  516. int ret;
  517. engine->set_default_submission(engine);
  518. /* We may need to do things with the shrinker which
  519. * require us to immediately switch back to the default
  520. * context. This can cause a problem as pinning the
  521. * default context also requires GTT space which may not
  522. * be available. To avoid this we always pin the default
  523. * context.
  524. */
  525. ring = engine->context_pin(engine, engine->i915->kernel_context);
  526. if (IS_ERR(ring))
  527. return PTR_ERR(ring);
  528. /*
  529. * Similarly the preempt context must always be available so that
  530. * we can interrupt the engine at any time.
  531. */
  532. if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
  533. ring = engine->context_pin(engine,
  534. engine->i915->preempt_context);
  535. if (IS_ERR(ring)) {
  536. ret = PTR_ERR(ring);
  537. goto err_unpin_kernel;
  538. }
  539. }
  540. ret = intel_engine_init_breadcrumbs(engine);
  541. if (ret)
  542. goto err_unpin_preempt;
  543. if (HWS_NEEDS_PHYSICAL(engine->i915))
  544. ret = init_phys_status_page(engine);
  545. else
  546. ret = init_status_page(engine);
  547. if (ret)
  548. goto err_breadcrumbs;
  549. return 0;
  550. err_breadcrumbs:
  551. intel_engine_fini_breadcrumbs(engine);
  552. err_unpin_preempt:
  553. if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
  554. engine->context_unpin(engine, engine->i915->preempt_context);
  555. err_unpin_kernel:
  556. engine->context_unpin(engine, engine->i915->kernel_context);
  557. return ret;
  558. }
  559. /**
  560. * intel_engines_cleanup_common - cleans up the engine state created by
  561. * the common initiailizers.
  562. * @engine: Engine to cleanup.
  563. *
  564. * This cleans up everything created by the common helpers.
  565. */
  566. void intel_engine_cleanup_common(struct intel_engine_cs *engine)
  567. {
  568. intel_engine_cleanup_scratch(engine);
  569. if (HWS_NEEDS_PHYSICAL(engine->i915))
  570. cleanup_phys_status_page(engine);
  571. else
  572. cleanup_status_page(engine);
  573. intel_engine_fini_breadcrumbs(engine);
  574. intel_engine_cleanup_cmd_parser(engine);
  575. i915_gem_batch_pool_fini(&engine->batch_pool);
  576. if (engine->default_state)
  577. i915_gem_object_put(engine->default_state);
  578. if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
  579. engine->context_unpin(engine, engine->i915->preempt_context);
  580. engine->context_unpin(engine, engine->i915->kernel_context);
  581. }
  582. u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
  583. {
  584. struct drm_i915_private *dev_priv = engine->i915;
  585. u64 acthd;
  586. if (INTEL_GEN(dev_priv) >= 8)
  587. acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
  588. RING_ACTHD_UDW(engine->mmio_base));
  589. else if (INTEL_GEN(dev_priv) >= 4)
  590. acthd = I915_READ(RING_ACTHD(engine->mmio_base));
  591. else
  592. acthd = I915_READ(ACTHD);
  593. return acthd;
  594. }
  595. u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
  596. {
  597. struct drm_i915_private *dev_priv = engine->i915;
  598. u64 bbaddr;
  599. if (INTEL_GEN(dev_priv) >= 8)
  600. bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
  601. RING_BBADDR_UDW(engine->mmio_base));
  602. else
  603. bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
  604. return bbaddr;
  605. }
  606. const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
  607. {
  608. switch (type) {
  609. case I915_CACHE_NONE: return " uncached";
  610. case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
  611. case I915_CACHE_L3_LLC: return " L3+LLC";
  612. case I915_CACHE_WT: return " WT";
  613. default: return "";
  614. }
  615. }
  616. static inline uint32_t
  617. read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
  618. int subslice, i915_reg_t reg)
  619. {
  620. uint32_t mcr;
  621. uint32_t ret;
  622. enum forcewake_domains fw_domains;
  623. fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
  624. FW_REG_READ);
  625. fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
  626. GEN8_MCR_SELECTOR,
  627. FW_REG_READ | FW_REG_WRITE);
  628. spin_lock_irq(&dev_priv->uncore.lock);
  629. intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
  630. mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
  631. /*
  632. * The HW expects the slice and sublice selectors to be reset to 0
  633. * after reading out the registers.
  634. */
  635. WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
  636. mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
  637. mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
  638. I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
  639. ret = I915_READ_FW(reg);
  640. mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
  641. I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
  642. intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
  643. spin_unlock_irq(&dev_priv->uncore.lock);
  644. return ret;
  645. }
  646. /* NB: please notice the memset */
  647. void intel_engine_get_instdone(struct intel_engine_cs *engine,
  648. struct intel_instdone *instdone)
  649. {
  650. struct drm_i915_private *dev_priv = engine->i915;
  651. u32 mmio_base = engine->mmio_base;
  652. int slice;
  653. int subslice;
  654. memset(instdone, 0, sizeof(*instdone));
  655. switch (INTEL_GEN(dev_priv)) {
  656. default:
  657. instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
  658. if (engine->id != RCS)
  659. break;
  660. instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
  661. for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
  662. instdone->sampler[slice][subslice] =
  663. read_subslice_reg(dev_priv, slice, subslice,
  664. GEN7_SAMPLER_INSTDONE);
  665. instdone->row[slice][subslice] =
  666. read_subslice_reg(dev_priv, slice, subslice,
  667. GEN7_ROW_INSTDONE);
  668. }
  669. break;
  670. case 7:
  671. instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
  672. if (engine->id != RCS)
  673. break;
  674. instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
  675. instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
  676. instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
  677. break;
  678. case 6:
  679. case 5:
  680. case 4:
  681. instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
  682. if (engine->id == RCS)
  683. /* HACK: Using the wrong struct member */
  684. instdone->slice_common = I915_READ(GEN4_INSTDONE1);
  685. break;
  686. case 3:
  687. case 2:
  688. instdone->instdone = I915_READ(GEN2_INSTDONE);
  689. break;
  690. }
  691. }
  692. static int wa_add(struct drm_i915_private *dev_priv,
  693. i915_reg_t addr,
  694. const u32 mask, const u32 val)
  695. {
  696. const u32 idx = dev_priv->workarounds.count;
  697. if (WARN_ON(idx >= I915_MAX_WA_REGS))
  698. return -ENOSPC;
  699. dev_priv->workarounds.reg[idx].addr = addr;
  700. dev_priv->workarounds.reg[idx].value = val;
  701. dev_priv->workarounds.reg[idx].mask = mask;
  702. dev_priv->workarounds.count++;
  703. return 0;
  704. }
  705. #define WA_REG(addr, mask, val) do { \
  706. const int r = wa_add(dev_priv, (addr), (mask), (val)); \
  707. if (r) \
  708. return r; \
  709. } while (0)
  710. #define WA_SET_BIT_MASKED(addr, mask) \
  711. WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
  712. #define WA_CLR_BIT_MASKED(addr, mask) \
  713. WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
  714. #define WA_SET_FIELD_MASKED(addr, mask, value) \
  715. WA_REG(addr, mask, _MASKED_FIELD(mask, value))
  716. static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
  717. i915_reg_t reg)
  718. {
  719. struct drm_i915_private *dev_priv = engine->i915;
  720. struct i915_workarounds *wa = &dev_priv->workarounds;
  721. const uint32_t index = wa->hw_whitelist_count[engine->id];
  722. if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
  723. return -EINVAL;
  724. I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
  725. i915_mmio_reg_offset(reg));
  726. wa->hw_whitelist_count[engine->id]++;
  727. return 0;
  728. }
  729. static int gen8_init_workarounds(struct intel_engine_cs *engine)
  730. {
  731. struct drm_i915_private *dev_priv = engine->i915;
  732. WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
  733. /* WaDisableAsyncFlipPerfMode:bdw,chv */
  734. WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
  735. /* WaDisablePartialInstShootdown:bdw,chv */
  736. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
  737. PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
  738. /* Use Force Non-Coherent whenever executing a 3D context. This is a
  739. * workaround for for a possible hang in the unlikely event a TLB
  740. * invalidation occurs during a PSD flush.
  741. */
  742. /* WaForceEnableNonCoherent:bdw,chv */
  743. /* WaHdcDisableFetchWhenMasked:bdw,chv */
  744. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  745. HDC_DONOT_FETCH_MEM_WHEN_MASKED |
  746. HDC_FORCE_NON_COHERENT);
  747. /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
  748. * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
  749. * polygons in the same 8x4 pixel/sample area to be processed without
  750. * stalling waiting for the earlier ones to write to Hierarchical Z
  751. * buffer."
  752. *
  753. * This optimization is off by default for BDW and CHV; turn it on.
  754. */
  755. WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
  756. /* Wa4x4STCOptimizationDisable:bdw,chv */
  757. WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
  758. /*
  759. * BSpec recommends 8x4 when MSAA is used,
  760. * however in practice 16x4 seems fastest.
  761. *
  762. * Note that PS/WM thread counts depend on the WIZ hashing
  763. * disable bit, which we don't touch here, but it's good
  764. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  765. */
  766. WA_SET_FIELD_MASKED(GEN7_GT_MODE,
  767. GEN6_WIZ_HASHING_MASK,
  768. GEN6_WIZ_HASHING_16x4);
  769. return 0;
  770. }
  771. static int bdw_init_workarounds(struct intel_engine_cs *engine)
  772. {
  773. struct drm_i915_private *dev_priv = engine->i915;
  774. int ret;
  775. ret = gen8_init_workarounds(engine);
  776. if (ret)
  777. return ret;
  778. /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
  779. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
  780. /* WaDisableDopClockGating:bdw
  781. *
  782. * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
  783. * to disable EUTC clock gating.
  784. */
  785. WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
  786. DOP_CLOCK_GATING_DISABLE);
  787. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
  788. GEN8_SAMPLER_POWER_BYPASS_DIS);
  789. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  790. /* WaForceContextSaveRestoreNonCoherent:bdw */
  791. HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
  792. /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
  793. (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
  794. return 0;
  795. }
  796. static int chv_init_workarounds(struct intel_engine_cs *engine)
  797. {
  798. struct drm_i915_private *dev_priv = engine->i915;
  799. int ret;
  800. ret = gen8_init_workarounds(engine);
  801. if (ret)
  802. return ret;
  803. /* WaDisableThreadStallDopClockGating:chv */
  804. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
  805. /* Improve HiZ throughput on CHV. */
  806. WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
  807. return 0;
  808. }
  809. static int gen9_init_workarounds(struct intel_engine_cs *engine)
  810. {
  811. struct drm_i915_private *dev_priv = engine->i915;
  812. int ret;
  813. /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
  814. I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
  815. /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
  816. I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
  817. GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
  818. /* WaDisableKillLogic:bxt,skl,kbl */
  819. if (!IS_COFFEELAKE(dev_priv))
  820. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
  821. ECOCHK_DIS_TLB);
  822. if (HAS_LLC(dev_priv)) {
  823. /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
  824. *
  825. * Must match Display Engine. See
  826. * WaCompressedResourceDisplayNewHashMode.
  827. */
  828. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  829. GEN9_PBE_COMPRESSED_HASH_SELECTION);
  830. WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
  831. GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
  832. I915_WRITE(MMCD_MISC_CTRL,
  833. I915_READ(MMCD_MISC_CTRL) |
  834. MMCD_PCLA |
  835. MMCD_HOTSPOT_EN);
  836. }
  837. /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
  838. /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
  839. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
  840. FLOW_CONTROL_ENABLE |
  841. PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
  842. /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
  843. if (!IS_COFFEELAKE(dev_priv))
  844. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
  845. GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
  846. /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
  847. /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
  848. WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
  849. GEN9_ENABLE_YV12_BUGFIX |
  850. GEN9_ENABLE_GPGPU_PREEMPTION);
  851. /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
  852. /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
  853. WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
  854. GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
  855. /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
  856. WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
  857. GEN9_CCS_TLB_PREFETCH_ENABLE);
  858. /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
  859. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  860. HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
  861. HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
  862. /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
  863. * both tied to WaForceContextSaveRestoreNonCoherent
  864. * in some hsds for skl. We keep the tie for all gen9. The
  865. * documentation is a bit hazy and so we want to get common behaviour,
  866. * even though there is no clear evidence we would need both on kbl/bxt.
  867. * This area has been source of system hangs so we play it safe
  868. * and mimic the skl regardless of what bspec says.
  869. *
  870. * Use Force Non-Coherent whenever executing a 3D context. This
  871. * is a workaround for a possible hang in the unlikely event
  872. * a TLB invalidation occurs during a PSD flush.
  873. */
  874. /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
  875. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  876. HDC_FORCE_NON_COHERENT);
  877. /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
  878. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
  879. BDW_DISABLE_HDC_INVALIDATION);
  880. /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
  881. if (IS_SKYLAKE(dev_priv) ||
  882. IS_KABYLAKE(dev_priv) ||
  883. IS_COFFEELAKE(dev_priv))
  884. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
  885. GEN8_SAMPLER_POWER_BYPASS_DIS);
  886. /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
  887. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
  888. /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
  889. if (IS_GEN9_LP(dev_priv)) {
  890. u32 val = I915_READ(GEN8_L3SQCREG1);
  891. val &= ~L3_PRIO_CREDITS_MASK;
  892. val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
  893. I915_WRITE(GEN8_L3SQCREG1, val);
  894. }
  895. /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
  896. I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
  897. GEN8_LQSC_FLUSH_COHERENT_LINES));
  898. /*
  899. * Supporting preemption with fine-granularity requires changes in the
  900. * batch buffer programming. Since we can't break old userspace, we
  901. * need to set our default preemption level to safe value. Userspace is
  902. * still able to use more fine-grained preemption levels, since in
  903. * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
  904. * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
  905. * not real HW workarounds, but merely a way to start using preemption
  906. * while maintaining old contract with userspace.
  907. */
  908. /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
  909. WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
  910. /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
  911. WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
  912. GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
  913. /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
  914. ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
  915. if (ret)
  916. return ret;
  917. /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
  918. I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
  919. _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
  920. ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
  921. if (ret)
  922. return ret;
  923. /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
  924. ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
  925. if (ret)
  926. return ret;
  927. return 0;
  928. }
  929. static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
  930. {
  931. struct drm_i915_private *dev_priv = engine->i915;
  932. u8 vals[3] = { 0, 0, 0 };
  933. unsigned int i;
  934. for (i = 0; i < 3; i++) {
  935. u8 ss;
  936. /*
  937. * Only consider slices where one, and only one, subslice has 7
  938. * EUs
  939. */
  940. if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
  941. continue;
  942. /*
  943. * subslice_7eu[i] != 0 (because of the check above) and
  944. * ss_max == 4 (maximum number of subslices possible per slice)
  945. *
  946. * -> 0 <= ss <= 3;
  947. */
  948. ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
  949. vals[i] = 3 - ss;
  950. }
  951. if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
  952. return 0;
  953. /* Tune IZ hashing. See intel_device_info_runtime_init() */
  954. WA_SET_FIELD_MASKED(GEN7_GT_MODE,
  955. GEN9_IZ_HASHING_MASK(2) |
  956. GEN9_IZ_HASHING_MASK(1) |
  957. GEN9_IZ_HASHING_MASK(0),
  958. GEN9_IZ_HASHING(2, vals[2]) |
  959. GEN9_IZ_HASHING(1, vals[1]) |
  960. GEN9_IZ_HASHING(0, vals[0]));
  961. return 0;
  962. }
  963. static int skl_init_workarounds(struct intel_engine_cs *engine)
  964. {
  965. struct drm_i915_private *dev_priv = engine->i915;
  966. int ret;
  967. ret = gen9_init_workarounds(engine);
  968. if (ret)
  969. return ret;
  970. /* WaEnableGapsTsvCreditFix:skl */
  971. I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
  972. GEN9_GAPS_TSV_CREDIT_DISABLE));
  973. /* WaDisableGafsUnitClkGating:skl */
  974. I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
  975. GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
  976. /* WaInPlaceDecompressionHang:skl */
  977. if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
  978. I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
  979. (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
  980. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
  981. /* WaDisableLSQCROPERFforOCL:skl */
  982. ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
  983. if (ret)
  984. return ret;
  985. return skl_tune_iz_hashing(engine);
  986. }
  987. static int bxt_init_workarounds(struct intel_engine_cs *engine)
  988. {
  989. struct drm_i915_private *dev_priv = engine->i915;
  990. int ret;
  991. ret = gen9_init_workarounds(engine);
  992. if (ret)
  993. return ret;
  994. /* WaDisableThreadStallDopClockGating:bxt */
  995. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
  996. STALL_DOP_GATING_DISABLE);
  997. /* WaDisablePooledEuLoadBalancingFix:bxt */
  998. I915_WRITE(FF_SLICE_CS_CHICKEN2,
  999. _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
  1000. /* WaToEnableHwFixForPushConstHWBug:bxt */
  1001. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  1002. GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
  1003. /* WaInPlaceDecompressionHang:bxt */
  1004. I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
  1005. (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
  1006. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
  1007. return 0;
  1008. }
  1009. static int cnl_init_workarounds(struct intel_engine_cs *engine)
  1010. {
  1011. struct drm_i915_private *dev_priv = engine->i915;
  1012. int ret;
  1013. /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
  1014. if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
  1015. I915_WRITE(GAMT_CHKN_BIT_REG,
  1016. (I915_READ(GAMT_CHKN_BIT_REG) |
  1017. GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
  1018. /* WaForceContextSaveRestoreNonCoherent:cnl */
  1019. WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
  1020. HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
  1021. /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
  1022. if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
  1023. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
  1024. /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
  1025. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  1026. GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
  1027. /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
  1028. if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
  1029. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  1030. GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
  1031. /* WaInPlaceDecompressionHang:cnl */
  1032. I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
  1033. (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
  1034. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
  1035. /* WaPushConstantDereferenceHoldDisable:cnl */
  1036. WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
  1037. /* FtrEnableFastAnisoL1BankingFix: cnl */
  1038. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
  1039. /* WaDisable3DMidCmdPreemption:cnl */
  1040. WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
  1041. /* WaDisableGPGPUMidCmdPreemption:cnl */
  1042. WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
  1043. GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
  1044. /* WaEnablePreemptionGranularityControlByUMD:cnl */
  1045. I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
  1046. _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
  1047. ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
  1048. if (ret)
  1049. return ret;
  1050. /* WaDisableEarlyEOT:cnl */
  1051. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
  1052. return 0;
  1053. }
  1054. static int kbl_init_workarounds(struct intel_engine_cs *engine)
  1055. {
  1056. struct drm_i915_private *dev_priv = engine->i915;
  1057. int ret;
  1058. ret = gen9_init_workarounds(engine);
  1059. if (ret)
  1060. return ret;
  1061. /* WaEnableGapsTsvCreditFix:kbl */
  1062. I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
  1063. GEN9_GAPS_TSV_CREDIT_DISABLE));
  1064. /* WaDisableDynamicCreditSharing:kbl */
  1065. if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
  1066. I915_WRITE(GAMT_CHKN_BIT_REG,
  1067. (I915_READ(GAMT_CHKN_BIT_REG) |
  1068. GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
  1069. /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
  1070. if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
  1071. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  1072. HDC_FENCE_DEST_SLM_DISABLE);
  1073. /* WaToEnableHwFixForPushConstHWBug:kbl */
  1074. if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
  1075. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  1076. GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
  1077. /* WaDisableGafsUnitClkGating:kbl */
  1078. I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
  1079. GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
  1080. /* WaDisableSbeCacheDispatchPortSharing:kbl */
  1081. WA_SET_BIT_MASKED(
  1082. GEN7_HALF_SLICE_CHICKEN1,
  1083. GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
  1084. /* WaInPlaceDecompressionHang:kbl */
  1085. I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
  1086. (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
  1087. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
  1088. /* WaDisableLSQCROPERFforOCL:kbl */
  1089. ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
  1090. if (ret)
  1091. return ret;
  1092. return 0;
  1093. }
  1094. static int glk_init_workarounds(struct intel_engine_cs *engine)
  1095. {
  1096. struct drm_i915_private *dev_priv = engine->i915;
  1097. int ret;
  1098. ret = gen9_init_workarounds(engine);
  1099. if (ret)
  1100. return ret;
  1101. /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
  1102. ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
  1103. if (ret)
  1104. return ret;
  1105. /* WaToEnableHwFixForPushConstHWBug:glk */
  1106. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  1107. GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
  1108. return 0;
  1109. }
  1110. static int cfl_init_workarounds(struct intel_engine_cs *engine)
  1111. {
  1112. struct drm_i915_private *dev_priv = engine->i915;
  1113. int ret;
  1114. ret = gen9_init_workarounds(engine);
  1115. if (ret)
  1116. return ret;
  1117. /* WaEnableGapsTsvCreditFix:cfl */
  1118. I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
  1119. GEN9_GAPS_TSV_CREDIT_DISABLE));
  1120. /* WaToEnableHwFixForPushConstHWBug:cfl */
  1121. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  1122. GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
  1123. /* WaDisableGafsUnitClkGating:cfl */
  1124. I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
  1125. GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
  1126. /* WaDisableSbeCacheDispatchPortSharing:cfl */
  1127. WA_SET_BIT_MASKED(
  1128. GEN7_HALF_SLICE_CHICKEN1,
  1129. GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
  1130. /* WaInPlaceDecompressionHang:cfl */
  1131. I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
  1132. (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
  1133. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
  1134. return 0;
  1135. }
  1136. int init_workarounds_ring(struct intel_engine_cs *engine)
  1137. {
  1138. struct drm_i915_private *dev_priv = engine->i915;
  1139. int err;
  1140. WARN_ON(engine->id != RCS);
  1141. dev_priv->workarounds.count = 0;
  1142. dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
  1143. if (IS_BROADWELL(dev_priv))
  1144. err = bdw_init_workarounds(engine);
  1145. else if (IS_CHERRYVIEW(dev_priv))
  1146. err = chv_init_workarounds(engine);
  1147. else if (IS_SKYLAKE(dev_priv))
  1148. err = skl_init_workarounds(engine);
  1149. else if (IS_BROXTON(dev_priv))
  1150. err = bxt_init_workarounds(engine);
  1151. else if (IS_KABYLAKE(dev_priv))
  1152. err = kbl_init_workarounds(engine);
  1153. else if (IS_GEMINILAKE(dev_priv))
  1154. err = glk_init_workarounds(engine);
  1155. else if (IS_COFFEELAKE(dev_priv))
  1156. err = cfl_init_workarounds(engine);
  1157. else if (IS_CANNONLAKE(dev_priv))
  1158. err = cnl_init_workarounds(engine);
  1159. else
  1160. err = 0;
  1161. if (err)
  1162. return err;
  1163. DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
  1164. engine->name, dev_priv->workarounds.count);
  1165. return 0;
  1166. }
  1167. int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
  1168. {
  1169. struct i915_workarounds *w = &req->i915->workarounds;
  1170. u32 *cs;
  1171. int ret, i;
  1172. if (w->count == 0)
  1173. return 0;
  1174. ret = req->engine->emit_flush(req, EMIT_BARRIER);
  1175. if (ret)
  1176. return ret;
  1177. cs = intel_ring_begin(req, (w->count * 2 + 2));
  1178. if (IS_ERR(cs))
  1179. return PTR_ERR(cs);
  1180. *cs++ = MI_LOAD_REGISTER_IMM(w->count);
  1181. for (i = 0; i < w->count; i++) {
  1182. *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
  1183. *cs++ = w->reg[i].value;
  1184. }
  1185. *cs++ = MI_NOOP;
  1186. intel_ring_advance(req, cs);
  1187. ret = req->engine->emit_flush(req, EMIT_BARRIER);
  1188. if (ret)
  1189. return ret;
  1190. return 0;
  1191. }
  1192. static bool ring_is_idle(struct intel_engine_cs *engine)
  1193. {
  1194. struct drm_i915_private *dev_priv = engine->i915;
  1195. bool idle = true;
  1196. intel_runtime_pm_get(dev_priv);
  1197. /* First check that no commands are left in the ring */
  1198. if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
  1199. (I915_READ_TAIL(engine) & TAIL_ADDR))
  1200. idle = false;
  1201. /* No bit for gen2, so assume the CS parser is idle */
  1202. if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
  1203. idle = false;
  1204. intel_runtime_pm_put(dev_priv);
  1205. return idle;
  1206. }
  1207. /**
  1208. * intel_engine_is_idle() - Report if the engine has finished process all work
  1209. * @engine: the intel_engine_cs
  1210. *
  1211. * Return true if there are no requests pending, nothing left to be submitted
  1212. * to hardware, and that the engine is idle.
  1213. */
  1214. bool intel_engine_is_idle(struct intel_engine_cs *engine)
  1215. {
  1216. struct drm_i915_private *dev_priv = engine->i915;
  1217. /* More white lies, if wedged, hw state is inconsistent */
  1218. if (i915_terminally_wedged(&dev_priv->gpu_error))
  1219. return true;
  1220. /* Any inflight/incomplete requests? */
  1221. if (!i915_seqno_passed(intel_engine_get_seqno(engine),
  1222. intel_engine_last_submit(engine)))
  1223. return false;
  1224. if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
  1225. return true;
  1226. /* Interrupt/tasklet pending? */
  1227. if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
  1228. return false;
  1229. /* Waiting to drain ELSP? */
  1230. if (READ_ONCE(engine->execlists.active))
  1231. return false;
  1232. /* ELSP is empty, but there are ready requests? */
  1233. if (READ_ONCE(engine->execlists.first))
  1234. return false;
  1235. /* Ring stopped? */
  1236. if (!ring_is_idle(engine))
  1237. return false;
  1238. return true;
  1239. }
  1240. bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
  1241. {
  1242. struct intel_engine_cs *engine;
  1243. enum intel_engine_id id;
  1244. /*
  1245. * If the driver is wedged, HW state may be very inconsistent and
  1246. * report that it is still busy, even though we have stopped using it.
  1247. */
  1248. if (i915_terminally_wedged(&dev_priv->gpu_error))
  1249. return true;
  1250. for_each_engine(engine, dev_priv, id) {
  1251. if (!intel_engine_is_idle(engine))
  1252. return false;
  1253. }
  1254. return true;
  1255. }
  1256. /**
  1257. * intel_engine_has_kernel_context:
  1258. * @engine: the engine
  1259. *
  1260. * Returns true if the last context to be executed on this engine, or has been
  1261. * executed if the engine is already idle, is the kernel context
  1262. * (#i915.kernel_context).
  1263. */
  1264. bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
  1265. {
  1266. const struct i915_gem_context * const kernel_context =
  1267. engine->i915->kernel_context;
  1268. struct drm_i915_gem_request *rq;
  1269. lockdep_assert_held(&engine->i915->drm.struct_mutex);
  1270. /*
  1271. * Check the last context seen by the engine. If active, it will be
  1272. * the last request that remains in the timeline. When idle, it is
  1273. * the last executed context as tracked by retirement.
  1274. */
  1275. rq = __i915_gem_active_peek(&engine->timeline->last_request);
  1276. if (rq)
  1277. return rq->ctx == kernel_context;
  1278. else
  1279. return engine->last_retired_context == kernel_context;
  1280. }
  1281. void intel_engines_reset_default_submission(struct drm_i915_private *i915)
  1282. {
  1283. struct intel_engine_cs *engine;
  1284. enum intel_engine_id id;
  1285. for_each_engine(engine, i915, id)
  1286. engine->set_default_submission(engine);
  1287. }
  1288. /**
  1289. * intel_engines_park: called when the GT is transitioning from busy->idle
  1290. * @i915: the i915 device
  1291. *
  1292. * The GT is now idle and about to go to sleep (maybe never to wake again?).
  1293. * Time for us to tidy and put away our toys (release resources back to the
  1294. * system).
  1295. */
  1296. void intel_engines_park(struct drm_i915_private *i915)
  1297. {
  1298. struct intel_engine_cs *engine;
  1299. enum intel_engine_id id;
  1300. for_each_engine(engine, i915, id) {
  1301. /* Flush the residual irq tasklets first. */
  1302. intel_engine_disarm_breadcrumbs(engine);
  1303. tasklet_kill(&engine->execlists.tasklet);
  1304. /*
  1305. * We are committed now to parking the engines, make sure there
  1306. * will be no more interrupts arriving later and the engines
  1307. * are truly idle.
  1308. */
  1309. if (wait_for(intel_engine_is_idle(engine), 10)) {
  1310. struct drm_printer p = drm_debug_printer(__func__);
  1311. dev_err(i915->drm.dev,
  1312. "%s is not idle before parking\n",
  1313. engine->name);
  1314. intel_engine_dump(engine, &p, NULL);
  1315. }
  1316. if (engine->park)
  1317. engine->park(engine);
  1318. i915_gem_batch_pool_fini(&engine->batch_pool);
  1319. engine->execlists.no_priolist = false;
  1320. }
  1321. }
  1322. /**
  1323. * intel_engines_unpark: called when the GT is transitioning from idle->busy
  1324. * @i915: the i915 device
  1325. *
  1326. * The GT was idle and now about to fire up with some new user requests.
  1327. */
  1328. void intel_engines_unpark(struct drm_i915_private *i915)
  1329. {
  1330. struct intel_engine_cs *engine;
  1331. enum intel_engine_id id;
  1332. for_each_engine(engine, i915, id) {
  1333. if (engine->unpark)
  1334. engine->unpark(engine);
  1335. }
  1336. }
  1337. bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
  1338. {
  1339. switch (INTEL_GEN(engine->i915)) {
  1340. case 2:
  1341. return false; /* uses physical not virtual addresses */
  1342. case 3:
  1343. /* maybe only uses physical not virtual addresses */
  1344. return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
  1345. case 6:
  1346. return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
  1347. default:
  1348. return true;
  1349. }
  1350. }
  1351. unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
  1352. {
  1353. struct intel_engine_cs *engine;
  1354. enum intel_engine_id id;
  1355. unsigned int which;
  1356. which = 0;
  1357. for_each_engine(engine, i915, id)
  1358. if (engine->default_state)
  1359. which |= BIT(engine->uabi_class);
  1360. return which;
  1361. }
  1362. static void print_request(struct drm_printer *m,
  1363. struct drm_i915_gem_request *rq,
  1364. const char *prefix)
  1365. {
  1366. drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
  1367. rq->global_seqno,
  1368. i915_gem_request_completed(rq) ? "!" : "",
  1369. rq->ctx->hw_id, rq->fence.seqno,
  1370. rq->priotree.priority,
  1371. jiffies_to_msecs(jiffies - rq->emitted_jiffies),
  1372. rq->timeline->common->name);
  1373. }
  1374. static void hexdump(struct drm_printer *m, const void *buf, size_t len)
  1375. {
  1376. const size_t rowsize = 8 * sizeof(u32);
  1377. const void *prev = NULL;
  1378. bool skip = false;
  1379. size_t pos;
  1380. for (pos = 0; pos < len; pos += rowsize) {
  1381. char line[128];
  1382. if (prev && !memcmp(prev, buf + pos, rowsize)) {
  1383. if (!skip) {
  1384. drm_printf(m, "*\n");
  1385. skip = true;
  1386. }
  1387. continue;
  1388. }
  1389. WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
  1390. rowsize, sizeof(u32),
  1391. line, sizeof(line),
  1392. false) >= sizeof(line));
  1393. drm_printf(m, "%08zx %s\n", pos, line);
  1394. prev = buf + pos;
  1395. skip = false;
  1396. }
  1397. }
  1398. void intel_engine_dump(struct intel_engine_cs *engine,
  1399. struct drm_printer *m,
  1400. const char *header, ...)
  1401. {
  1402. struct intel_breadcrumbs * const b = &engine->breadcrumbs;
  1403. const struct intel_engine_execlists * const execlists = &engine->execlists;
  1404. struct i915_gpu_error * const error = &engine->i915->gpu_error;
  1405. struct drm_i915_private *dev_priv = engine->i915;
  1406. struct drm_i915_gem_request *rq;
  1407. struct rb_node *rb;
  1408. char hdr[80];
  1409. u64 addr;
  1410. if (header) {
  1411. va_list ap;
  1412. va_start(ap, header);
  1413. drm_vprintf(m, header, &ap);
  1414. va_end(ap);
  1415. }
  1416. if (i915_terminally_wedged(&engine->i915->gpu_error))
  1417. drm_printf(m, "*** WEDGED ***\n");
  1418. drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
  1419. intel_engine_get_seqno(engine),
  1420. intel_engine_last_submit(engine),
  1421. engine->hangcheck.seqno,
  1422. jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
  1423. engine->timeline->inflight_seqnos);
  1424. drm_printf(m, "\tReset count: %d (global %d)\n",
  1425. i915_reset_engine_count(error, engine),
  1426. i915_reset_count(error));
  1427. rcu_read_lock();
  1428. drm_printf(m, "\tRequests:\n");
  1429. rq = list_first_entry(&engine->timeline->requests,
  1430. struct drm_i915_gem_request, link);
  1431. if (&rq->link != &engine->timeline->requests)
  1432. print_request(m, rq, "\t\tfirst ");
  1433. rq = list_last_entry(&engine->timeline->requests,
  1434. struct drm_i915_gem_request, link);
  1435. if (&rq->link != &engine->timeline->requests)
  1436. print_request(m, rq, "\t\tlast ");
  1437. rq = i915_gem_find_active_request(engine);
  1438. if (rq) {
  1439. print_request(m, rq, "\t\tactive ");
  1440. drm_printf(m,
  1441. "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
  1442. rq->head, rq->postfix, rq->tail,
  1443. rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
  1444. rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
  1445. }
  1446. drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
  1447. I915_READ(RING_START(engine->mmio_base)),
  1448. rq ? i915_ggtt_offset(rq->ring->vma) : 0);
  1449. drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
  1450. I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
  1451. rq ? rq->ring->head : 0);
  1452. drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
  1453. I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
  1454. rq ? rq->ring->tail : 0);
  1455. drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
  1456. I915_READ(RING_CTL(engine->mmio_base)),
  1457. I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
  1458. if (INTEL_GEN(engine->i915) > 2) {
  1459. drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
  1460. I915_READ(RING_MI_MODE(engine->mmio_base)),
  1461. I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
  1462. }
  1463. if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
  1464. drm_printf(m, "\tSYNC_0: 0x%08x\n",
  1465. I915_READ(RING_SYNC_0(engine->mmio_base)));
  1466. drm_printf(m, "\tSYNC_1: 0x%08x\n",
  1467. I915_READ(RING_SYNC_1(engine->mmio_base)));
  1468. if (HAS_VEBOX(dev_priv))
  1469. drm_printf(m, "\tSYNC_2: 0x%08x\n",
  1470. I915_READ(RING_SYNC_2(engine->mmio_base)));
  1471. }
  1472. rcu_read_unlock();
  1473. addr = intel_engine_get_active_head(engine);
  1474. drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
  1475. upper_32_bits(addr), lower_32_bits(addr));
  1476. addr = intel_engine_get_last_batch_head(engine);
  1477. drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
  1478. upper_32_bits(addr), lower_32_bits(addr));
  1479. if (INTEL_GEN(dev_priv) >= 8)
  1480. addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
  1481. RING_DMA_FADD_UDW(engine->mmio_base));
  1482. else if (INTEL_GEN(dev_priv) >= 4)
  1483. addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
  1484. else
  1485. addr = I915_READ(DMA_FADD_I8XX);
  1486. drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
  1487. upper_32_bits(addr), lower_32_bits(addr));
  1488. if (INTEL_GEN(dev_priv) >= 4) {
  1489. drm_printf(m, "\tIPEIR: 0x%08x\n",
  1490. I915_READ(RING_IPEIR(engine->mmio_base)));
  1491. drm_printf(m, "\tIPEHR: 0x%08x\n",
  1492. I915_READ(RING_IPEHR(engine->mmio_base)));
  1493. } else {
  1494. drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
  1495. drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
  1496. }
  1497. if (HAS_EXECLISTS(dev_priv)) {
  1498. const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
  1499. u32 ptr, read, write;
  1500. unsigned int idx;
  1501. drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
  1502. I915_READ(RING_EXECLIST_STATUS_LO(engine)),
  1503. I915_READ(RING_EXECLIST_STATUS_HI(engine)));
  1504. ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
  1505. read = GEN8_CSB_READ_PTR(ptr);
  1506. write = GEN8_CSB_WRITE_PTR(ptr);
  1507. drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
  1508. read, execlists->csb_head,
  1509. write,
  1510. intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
  1511. yesno(test_bit(ENGINE_IRQ_EXECLIST,
  1512. &engine->irq_posted)));
  1513. if (read >= GEN8_CSB_ENTRIES)
  1514. read = 0;
  1515. if (write >= GEN8_CSB_ENTRIES)
  1516. write = 0;
  1517. if (read > write)
  1518. write += GEN8_CSB_ENTRIES;
  1519. while (read < write) {
  1520. idx = ++read % GEN8_CSB_ENTRIES;
  1521. drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
  1522. idx,
  1523. I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
  1524. hws[idx * 2],
  1525. I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
  1526. hws[idx * 2 + 1]);
  1527. }
  1528. rcu_read_lock();
  1529. for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
  1530. unsigned int count;
  1531. rq = port_unpack(&execlists->port[idx], &count);
  1532. if (rq) {
  1533. snprintf(hdr, sizeof(hdr),
  1534. "\t\tELSP[%d] count=%d, rq: ",
  1535. idx, count);
  1536. print_request(m, rq, hdr);
  1537. } else {
  1538. drm_printf(m, "\t\tELSP[%d] idle\n", idx);
  1539. }
  1540. }
  1541. drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
  1542. rcu_read_unlock();
  1543. } else if (INTEL_GEN(dev_priv) > 6) {
  1544. drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
  1545. I915_READ(RING_PP_DIR_BASE(engine)));
  1546. drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
  1547. I915_READ(RING_PP_DIR_BASE_READ(engine)));
  1548. drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
  1549. I915_READ(RING_PP_DIR_DCLV(engine)));
  1550. }
  1551. spin_lock_irq(&engine->timeline->lock);
  1552. list_for_each_entry(rq, &engine->timeline->requests, link)
  1553. print_request(m, rq, "\t\tE ");
  1554. for (rb = execlists->first; rb; rb = rb_next(rb)) {
  1555. struct i915_priolist *p =
  1556. rb_entry(rb, typeof(*p), node);
  1557. list_for_each_entry(rq, &p->requests, priotree.link)
  1558. print_request(m, rq, "\t\tQ ");
  1559. }
  1560. spin_unlock_irq(&engine->timeline->lock);
  1561. spin_lock_irq(&b->rb_lock);
  1562. for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
  1563. struct intel_wait *w = rb_entry(rb, typeof(*w), node);
  1564. drm_printf(m, "\t%s [%d] waiting for %x\n",
  1565. w->tsk->comm, w->tsk->pid, w->seqno);
  1566. }
  1567. spin_unlock_irq(&b->rb_lock);
  1568. if (INTEL_GEN(dev_priv) >= 6) {
  1569. drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
  1570. }
  1571. drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
  1572. engine->irq_posted,
  1573. yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
  1574. &engine->irq_posted)),
  1575. yesno(test_bit(ENGINE_IRQ_EXECLIST,
  1576. &engine->irq_posted)));
  1577. drm_printf(m, "HWSP:\n");
  1578. hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
  1579. drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
  1580. }
  1581. static u8 user_class_map[] = {
  1582. [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
  1583. [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
  1584. [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
  1585. [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
  1586. };
  1587. struct intel_engine_cs *
  1588. intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
  1589. {
  1590. if (class >= ARRAY_SIZE(user_class_map))
  1591. return NULL;
  1592. class = user_class_map[class];
  1593. GEM_BUG_ON(class > MAX_ENGINE_CLASS);
  1594. if (instance > MAX_ENGINE_INSTANCE)
  1595. return NULL;
  1596. return i915->engine_class[class][instance];
  1597. }
  1598. /**
  1599. * intel_enable_engine_stats() - Enable engine busy tracking on engine
  1600. * @engine: engine to enable stats collection
  1601. *
  1602. * Start collecting the engine busyness data for @engine.
  1603. *
  1604. * Returns 0 on success or a negative error code.
  1605. */
  1606. int intel_enable_engine_stats(struct intel_engine_cs *engine)
  1607. {
  1608. unsigned long flags;
  1609. if (!intel_engine_supports_stats(engine))
  1610. return -ENODEV;
  1611. spin_lock_irqsave(&engine->stats.lock, flags);
  1612. if (engine->stats.enabled == ~0)
  1613. goto busy;
  1614. if (engine->stats.enabled++ == 0)
  1615. engine->stats.enabled_at = ktime_get();
  1616. spin_unlock_irqrestore(&engine->stats.lock, flags);
  1617. return 0;
  1618. busy:
  1619. spin_unlock_irqrestore(&engine->stats.lock, flags);
  1620. return -EBUSY;
  1621. }
  1622. static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
  1623. {
  1624. ktime_t total = engine->stats.total;
  1625. /*
  1626. * If the engine is executing something at the moment
  1627. * add it to the total.
  1628. */
  1629. if (engine->stats.active)
  1630. total = ktime_add(total,
  1631. ktime_sub(ktime_get(), engine->stats.start));
  1632. return total;
  1633. }
  1634. /**
  1635. * intel_engine_get_busy_time() - Return current accumulated engine busyness
  1636. * @engine: engine to report on
  1637. *
  1638. * Returns accumulated time @engine was busy since engine stats were enabled.
  1639. */
  1640. ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
  1641. {
  1642. ktime_t total;
  1643. unsigned long flags;
  1644. spin_lock_irqsave(&engine->stats.lock, flags);
  1645. total = __intel_engine_get_busy_time(engine);
  1646. spin_unlock_irqrestore(&engine->stats.lock, flags);
  1647. return total;
  1648. }
  1649. /**
  1650. * intel_disable_engine_stats() - Disable engine busy tracking on engine
  1651. * @engine: engine to disable stats collection
  1652. *
  1653. * Stops collecting the engine busyness data for @engine.
  1654. */
  1655. void intel_disable_engine_stats(struct intel_engine_cs *engine)
  1656. {
  1657. unsigned long flags;
  1658. if (!intel_engine_supports_stats(engine))
  1659. return;
  1660. spin_lock_irqsave(&engine->stats.lock, flags);
  1661. WARN_ON_ONCE(engine->stats.enabled == 0);
  1662. if (--engine->stats.enabled == 0) {
  1663. engine->stats.total = __intel_engine_get_busy_time(engine);
  1664. engine->stats.active = 0;
  1665. }
  1666. spin_unlock_irqrestore(&engine->stats.lock, flags);
  1667. }
  1668. #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  1669. #include "selftests/mock_engine.c"
  1670. #endif