intel_engine_cs.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726
  1. /*
  2. * Copyright © 2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <drm/drm_print.h>
  25. #include "i915_drv.h"
  26. #include "i915_vgpu.h"
  27. #include "intel_ringbuffer.h"
  28. #include "intel_lrc.h"
  29. /* Haswell does have the CXT_SIZE register however it does not appear to be
  30. * valid. Now, docs explain in dwords what is in the context object. The full
  31. * size is 70720 bytes, however, the power context and execlist context will
  32. * never be saved (power context is stored elsewhere, and execlists don't work
  33. * on HSW) - so the final size, including the extra state required for the
  34. * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
  35. */
  36. #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
  37. #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
  38. #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
  39. #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
  40. #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
  41. #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
  42. #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
  43. struct engine_class_info {
  44. const char *name;
  45. int (*init_legacy)(struct intel_engine_cs *engine);
  46. int (*init_execlists)(struct intel_engine_cs *engine);
  47. u8 uabi_class;
  48. };
  49. static const struct engine_class_info intel_engine_classes[] = {
  50. [RENDER_CLASS] = {
  51. .name = "rcs",
  52. .init_execlists = logical_render_ring_init,
  53. .init_legacy = intel_init_render_ring_buffer,
  54. .uabi_class = I915_ENGINE_CLASS_RENDER,
  55. },
  56. [COPY_ENGINE_CLASS] = {
  57. .name = "bcs",
  58. .init_execlists = logical_xcs_ring_init,
  59. .init_legacy = intel_init_blt_ring_buffer,
  60. .uabi_class = I915_ENGINE_CLASS_COPY,
  61. },
  62. [VIDEO_DECODE_CLASS] = {
  63. .name = "vcs",
  64. .init_execlists = logical_xcs_ring_init,
  65. .init_legacy = intel_init_bsd_ring_buffer,
  66. .uabi_class = I915_ENGINE_CLASS_VIDEO,
  67. },
  68. [VIDEO_ENHANCEMENT_CLASS] = {
  69. .name = "vecs",
  70. .init_execlists = logical_xcs_ring_init,
  71. .init_legacy = intel_init_vebox_ring_buffer,
  72. .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
  73. },
  74. };
  75. #define MAX_MMIO_BASES 3
  76. struct engine_info {
  77. unsigned int hw_id;
  78. unsigned int uabi_id;
  79. u8 class;
  80. u8 instance;
  81. /* mmio bases table *must* be sorted in reverse gen order */
  82. struct engine_mmio_base {
  83. u32 gen : 8;
  84. u32 base : 24;
  85. } mmio_bases[MAX_MMIO_BASES];
  86. };
  87. static const struct engine_info intel_engines[] = {
  88. [RCS] = {
  89. .hw_id = RCS_HW,
  90. .uabi_id = I915_EXEC_RENDER,
  91. .class = RENDER_CLASS,
  92. .instance = 0,
  93. .mmio_bases = {
  94. { .gen = 1, .base = RENDER_RING_BASE }
  95. },
  96. },
  97. [BCS] = {
  98. .hw_id = BCS_HW,
  99. .uabi_id = I915_EXEC_BLT,
  100. .class = COPY_ENGINE_CLASS,
  101. .instance = 0,
  102. .mmio_bases = {
  103. { .gen = 6, .base = BLT_RING_BASE }
  104. },
  105. },
  106. [VCS] = {
  107. .hw_id = VCS_HW,
  108. .uabi_id = I915_EXEC_BSD,
  109. .class = VIDEO_DECODE_CLASS,
  110. .instance = 0,
  111. .mmio_bases = {
  112. { .gen = 11, .base = GEN11_BSD_RING_BASE },
  113. { .gen = 6, .base = GEN6_BSD_RING_BASE },
  114. { .gen = 4, .base = BSD_RING_BASE }
  115. },
  116. },
  117. [VCS2] = {
  118. .hw_id = VCS2_HW,
  119. .uabi_id = I915_EXEC_BSD,
  120. .class = VIDEO_DECODE_CLASS,
  121. .instance = 1,
  122. .mmio_bases = {
  123. { .gen = 11, .base = GEN11_BSD2_RING_BASE },
  124. { .gen = 8, .base = GEN8_BSD2_RING_BASE }
  125. },
  126. },
  127. [VCS3] = {
  128. .hw_id = VCS3_HW,
  129. .uabi_id = I915_EXEC_BSD,
  130. .class = VIDEO_DECODE_CLASS,
  131. .instance = 2,
  132. .mmio_bases = {
  133. { .gen = 11, .base = GEN11_BSD3_RING_BASE }
  134. },
  135. },
  136. [VCS4] = {
  137. .hw_id = VCS4_HW,
  138. .uabi_id = I915_EXEC_BSD,
  139. .class = VIDEO_DECODE_CLASS,
  140. .instance = 3,
  141. .mmio_bases = {
  142. { .gen = 11, .base = GEN11_BSD4_RING_BASE }
  143. },
  144. },
  145. [VECS] = {
  146. .hw_id = VECS_HW,
  147. .uabi_id = I915_EXEC_VEBOX,
  148. .class = VIDEO_ENHANCEMENT_CLASS,
  149. .instance = 0,
  150. .mmio_bases = {
  151. { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
  152. { .gen = 7, .base = VEBOX_RING_BASE }
  153. },
  154. },
  155. [VECS2] = {
  156. .hw_id = VECS2_HW,
  157. .uabi_id = I915_EXEC_VEBOX,
  158. .class = VIDEO_ENHANCEMENT_CLASS,
  159. .instance = 1,
  160. .mmio_bases = {
  161. { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
  162. },
  163. },
  164. };
  165. /**
  166. * ___intel_engine_context_size() - return the size of the context for an engine
  167. * @dev_priv: i915 device private
  168. * @class: engine class
  169. *
  170. * Each engine class may require a different amount of space for a context
  171. * image.
  172. *
  173. * Return: size (in bytes) of an engine class specific context image
  174. *
  175. * Note: this size includes the HWSP, which is part of the context image
  176. * in LRC mode, but does not include the "shared data page" used with
  177. * GuC submission. The caller should account for this if using the GuC.
  178. */
  179. static u32
  180. __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
  181. {
  182. u32 cxt_size;
  183. BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
  184. switch (class) {
  185. case RENDER_CLASS:
  186. switch (INTEL_GEN(dev_priv)) {
  187. default:
  188. MISSING_CASE(INTEL_GEN(dev_priv));
  189. return DEFAULT_LR_CONTEXT_RENDER_SIZE;
  190. case 11:
  191. return GEN11_LR_CONTEXT_RENDER_SIZE;
  192. case 10:
  193. return GEN10_LR_CONTEXT_RENDER_SIZE;
  194. case 9:
  195. return GEN9_LR_CONTEXT_RENDER_SIZE;
  196. case 8:
  197. return GEN8_LR_CONTEXT_RENDER_SIZE;
  198. case 7:
  199. if (IS_HASWELL(dev_priv))
  200. return HSW_CXT_TOTAL_SIZE;
  201. cxt_size = I915_READ(GEN7_CXT_SIZE);
  202. return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
  203. PAGE_SIZE);
  204. case 6:
  205. cxt_size = I915_READ(CXT_SIZE);
  206. return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
  207. PAGE_SIZE);
  208. case 5:
  209. case 4:
  210. case 3:
  211. case 2:
  212. /* For the special day when i810 gets merged. */
  213. case 1:
  214. return 0;
  215. }
  216. break;
  217. default:
  218. MISSING_CASE(class);
  219. case VIDEO_DECODE_CLASS:
  220. case VIDEO_ENHANCEMENT_CLASS:
  221. case COPY_ENGINE_CLASS:
  222. if (INTEL_GEN(dev_priv) < 8)
  223. return 0;
  224. return GEN8_LR_CONTEXT_OTHER_SIZE;
  225. }
  226. }
  227. static u32 __engine_mmio_base(struct drm_i915_private *i915,
  228. const struct engine_mmio_base *bases)
  229. {
  230. int i;
  231. for (i = 0; i < MAX_MMIO_BASES; i++)
  232. if (INTEL_GEN(i915) >= bases[i].gen)
  233. break;
  234. GEM_BUG_ON(i == MAX_MMIO_BASES);
  235. GEM_BUG_ON(!bases[i].base);
  236. return bases[i].base;
  237. }
  238. static void __sprint_engine_name(char *name, const struct engine_info *info)
  239. {
  240. WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
  241. intel_engine_classes[info->class].name,
  242. info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
  243. }
  244. static int
  245. intel_engine_setup(struct drm_i915_private *dev_priv,
  246. enum intel_engine_id id)
  247. {
  248. const struct engine_info *info = &intel_engines[id];
  249. struct intel_engine_cs *engine;
  250. GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
  251. BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
  252. BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
  253. if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
  254. return -EINVAL;
  255. if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
  256. return -EINVAL;
  257. if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
  258. return -EINVAL;
  259. GEM_BUG_ON(dev_priv->engine[id]);
  260. engine = kzalloc(sizeof(*engine), GFP_KERNEL);
  261. if (!engine)
  262. return -ENOMEM;
  263. engine->id = id;
  264. engine->i915 = dev_priv;
  265. __sprint_engine_name(engine->name, info);
  266. engine->hw_id = engine->guc_id = info->hw_id;
  267. engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
  268. engine->class = info->class;
  269. engine->instance = info->instance;
  270. engine->uabi_id = info->uabi_id;
  271. engine->uabi_class = intel_engine_classes[info->class].uabi_class;
  272. engine->context_size = __intel_engine_context_size(dev_priv,
  273. engine->class);
  274. if (WARN_ON(engine->context_size > BIT(20)))
  275. engine->context_size = 0;
  276. /* Nothing to do here, execute in order of dependencies */
  277. engine->schedule = NULL;
  278. seqlock_init(&engine->stats.lock);
  279. ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
  280. dev_priv->engine_class[info->class][info->instance] = engine;
  281. dev_priv->engine[id] = engine;
  282. return 0;
  283. }
  284. /**
  285. * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
  286. * @dev_priv: i915 device private
  287. *
  288. * Return: non-zero if the initialization failed.
  289. */
  290. int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
  291. {
  292. struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
  293. const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
  294. struct intel_engine_cs *engine;
  295. enum intel_engine_id id;
  296. unsigned int mask = 0;
  297. unsigned int i;
  298. int err;
  299. WARN_ON(ring_mask == 0);
  300. WARN_ON(ring_mask &
  301. GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
  302. for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
  303. if (!HAS_ENGINE(dev_priv, i))
  304. continue;
  305. err = intel_engine_setup(dev_priv, i);
  306. if (err)
  307. goto cleanup;
  308. mask |= ENGINE_MASK(i);
  309. }
  310. /*
  311. * Catch failures to update intel_engines table when the new engines
  312. * are added to the driver by a warning and disabling the forgotten
  313. * engines.
  314. */
  315. if (WARN_ON(mask != ring_mask))
  316. device_info->ring_mask = mask;
  317. /* We always presume we have at least RCS available for later probing */
  318. if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
  319. err = -ENODEV;
  320. goto cleanup;
  321. }
  322. device_info->num_rings = hweight32(mask);
  323. i915_check_and_clear_faults(dev_priv);
  324. return 0;
  325. cleanup:
  326. for_each_engine(engine, dev_priv, id)
  327. kfree(engine);
  328. return err;
  329. }
  330. /**
  331. * intel_engines_init() - init the Engine Command Streamers
  332. * @dev_priv: i915 device private
  333. *
  334. * Return: non-zero if the initialization failed.
  335. */
  336. int intel_engines_init(struct drm_i915_private *dev_priv)
  337. {
  338. struct intel_engine_cs *engine;
  339. enum intel_engine_id id, err_id;
  340. int err;
  341. for_each_engine(engine, dev_priv, id) {
  342. const struct engine_class_info *class_info =
  343. &intel_engine_classes[engine->class];
  344. int (*init)(struct intel_engine_cs *engine);
  345. if (HAS_EXECLISTS(dev_priv))
  346. init = class_info->init_execlists;
  347. else
  348. init = class_info->init_legacy;
  349. err = -EINVAL;
  350. err_id = id;
  351. if (GEM_WARN_ON(!init))
  352. goto cleanup;
  353. err = init(engine);
  354. if (err)
  355. goto cleanup;
  356. GEM_BUG_ON(!engine->submit_request);
  357. }
  358. return 0;
  359. cleanup:
  360. for_each_engine(engine, dev_priv, id) {
  361. if (id >= err_id) {
  362. kfree(engine);
  363. dev_priv->engine[id] = NULL;
  364. } else {
  365. dev_priv->gt.cleanup_engine(engine);
  366. }
  367. }
  368. return err;
  369. }
  370. void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
  371. {
  372. struct drm_i915_private *dev_priv = engine->i915;
  373. /* Our semaphore implementation is strictly monotonic (i.e. we proceed
  374. * so long as the semaphore value in the register/page is greater
  375. * than the sync value), so whenever we reset the seqno,
  376. * so long as we reset the tracking semaphore value to 0, it will
  377. * always be before the next request's seqno. If we don't reset
  378. * the semaphore value, then when the seqno moves backwards all
  379. * future waits will complete instantly (causing rendering corruption).
  380. */
  381. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
  382. I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
  383. I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
  384. if (HAS_VEBOX(dev_priv))
  385. I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
  386. }
  387. intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
  388. clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
  389. /* After manually advancing the seqno, fake the interrupt in case
  390. * there are any waiters for that seqno.
  391. */
  392. intel_engine_wakeup(engine);
  393. GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
  394. }
  395. static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
  396. {
  397. i915_gem_batch_pool_init(&engine->batch_pool, engine);
  398. }
  399. static bool csb_force_mmio(struct drm_i915_private *i915)
  400. {
  401. /* Older GVT emulation depends upon intercepting CSB mmio */
  402. if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
  403. return true;
  404. return false;
  405. }
  406. static void intel_engine_init_execlist(struct intel_engine_cs *engine)
  407. {
  408. struct intel_engine_execlists * const execlists = &engine->execlists;
  409. execlists->csb_use_mmio = csb_force_mmio(engine->i915);
  410. execlists->port_mask = 1;
  411. BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
  412. GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
  413. execlists->queue_priority = INT_MIN;
  414. execlists->queue = RB_ROOT;
  415. execlists->first = NULL;
  416. }
  417. /**
  418. * intel_engines_setup_common - setup engine state not requiring hw access
  419. * @engine: Engine to setup.
  420. *
  421. * Initializes @engine@ structure members shared between legacy and execlists
  422. * submission modes which do not require hardware access.
  423. *
  424. * Typically done early in the submission mode specific engine setup stage.
  425. */
  426. void intel_engine_setup_common(struct intel_engine_cs *engine)
  427. {
  428. i915_timeline_init(engine->i915, &engine->timeline, engine->name);
  429. intel_engine_init_execlist(engine);
  430. intel_engine_init_hangcheck(engine);
  431. intel_engine_init_batch_pool(engine);
  432. intel_engine_init_cmd_parser(engine);
  433. }
  434. int intel_engine_create_scratch(struct intel_engine_cs *engine,
  435. unsigned int size)
  436. {
  437. struct drm_i915_gem_object *obj;
  438. struct i915_vma *vma;
  439. int ret;
  440. WARN_ON(engine->scratch);
  441. obj = i915_gem_object_create_stolen(engine->i915, size);
  442. if (!obj)
  443. obj = i915_gem_object_create_internal(engine->i915, size);
  444. if (IS_ERR(obj)) {
  445. DRM_ERROR("Failed to allocate scratch page\n");
  446. return PTR_ERR(obj);
  447. }
  448. vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
  449. if (IS_ERR(vma)) {
  450. ret = PTR_ERR(vma);
  451. goto err_unref;
  452. }
  453. ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
  454. if (ret)
  455. goto err_unref;
  456. engine->scratch = vma;
  457. return 0;
  458. err_unref:
  459. i915_gem_object_put(obj);
  460. return ret;
  461. }
  462. void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
  463. {
  464. i915_vma_unpin_and_release(&engine->scratch);
  465. }
  466. static void cleanup_phys_status_page(struct intel_engine_cs *engine)
  467. {
  468. struct drm_i915_private *dev_priv = engine->i915;
  469. if (!dev_priv->status_page_dmah)
  470. return;
  471. drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
  472. engine->status_page.page_addr = NULL;
  473. }
  474. static void cleanup_status_page(struct intel_engine_cs *engine)
  475. {
  476. struct i915_vma *vma;
  477. struct drm_i915_gem_object *obj;
  478. vma = fetch_and_zero(&engine->status_page.vma);
  479. if (!vma)
  480. return;
  481. obj = vma->obj;
  482. i915_vma_unpin(vma);
  483. i915_vma_close(vma);
  484. i915_gem_object_unpin_map(obj);
  485. __i915_gem_object_release_unless_active(obj);
  486. }
  487. static int init_status_page(struct intel_engine_cs *engine)
  488. {
  489. struct drm_i915_gem_object *obj;
  490. struct i915_vma *vma;
  491. unsigned int flags;
  492. void *vaddr;
  493. int ret;
  494. obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
  495. if (IS_ERR(obj)) {
  496. DRM_ERROR("Failed to allocate status page\n");
  497. return PTR_ERR(obj);
  498. }
  499. ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  500. if (ret)
  501. goto err;
  502. vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
  503. if (IS_ERR(vma)) {
  504. ret = PTR_ERR(vma);
  505. goto err;
  506. }
  507. flags = PIN_GLOBAL;
  508. if (!HAS_LLC(engine->i915))
  509. /* On g33, we cannot place HWS above 256MiB, so
  510. * restrict its pinning to the low mappable arena.
  511. * Though this restriction is not documented for
  512. * gen4, gen5, or byt, they also behave similarly
  513. * and hang if the HWS is placed at the top of the
  514. * GTT. To generalise, it appears that all !llc
  515. * platforms have issues with us placing the HWS
  516. * above the mappable region (even though we never
  517. * actually map it).
  518. */
  519. flags |= PIN_MAPPABLE;
  520. else
  521. flags |= PIN_HIGH;
  522. ret = i915_vma_pin(vma, 0, 4096, flags);
  523. if (ret)
  524. goto err;
  525. vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
  526. if (IS_ERR(vaddr)) {
  527. ret = PTR_ERR(vaddr);
  528. goto err_unpin;
  529. }
  530. engine->status_page.vma = vma;
  531. engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
  532. engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
  533. return 0;
  534. err_unpin:
  535. i915_vma_unpin(vma);
  536. err:
  537. i915_gem_object_put(obj);
  538. return ret;
  539. }
  540. static int init_phys_status_page(struct intel_engine_cs *engine)
  541. {
  542. struct drm_i915_private *dev_priv = engine->i915;
  543. GEM_BUG_ON(engine->id != RCS);
  544. dev_priv->status_page_dmah =
  545. drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
  546. if (!dev_priv->status_page_dmah)
  547. return -ENOMEM;
  548. engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  549. memset(engine->status_page.page_addr, 0, PAGE_SIZE);
  550. return 0;
  551. }
  552. static void __intel_context_unpin(struct i915_gem_context *ctx,
  553. struct intel_engine_cs *engine)
  554. {
  555. intel_context_unpin(to_intel_context(ctx, engine));
  556. }
  557. /**
  558. * intel_engines_init_common - initialize cengine state which might require hw access
  559. * @engine: Engine to initialize.
  560. *
  561. * Initializes @engine@ structure members shared between legacy and execlists
  562. * submission modes which do require hardware access.
  563. *
  564. * Typcally done at later stages of submission mode specific engine setup.
  565. *
  566. * Returns zero on success or an error code on failure.
  567. */
  568. int intel_engine_init_common(struct intel_engine_cs *engine)
  569. {
  570. struct drm_i915_private *i915 = engine->i915;
  571. struct intel_context *ce;
  572. int ret;
  573. engine->set_default_submission(engine);
  574. /* We may need to do things with the shrinker which
  575. * require us to immediately switch back to the default
  576. * context. This can cause a problem as pinning the
  577. * default context also requires GTT space which may not
  578. * be available. To avoid this we always pin the default
  579. * context.
  580. */
  581. ce = intel_context_pin(i915->kernel_context, engine);
  582. if (IS_ERR(ce))
  583. return PTR_ERR(ce);
  584. /*
  585. * Similarly the preempt context must always be available so that
  586. * we can interrupt the engine at any time.
  587. */
  588. if (i915->preempt_context) {
  589. ce = intel_context_pin(i915->preempt_context, engine);
  590. if (IS_ERR(ce)) {
  591. ret = PTR_ERR(ce);
  592. goto err_unpin_kernel;
  593. }
  594. }
  595. ret = intel_engine_init_breadcrumbs(engine);
  596. if (ret)
  597. goto err_unpin_preempt;
  598. if (HWS_NEEDS_PHYSICAL(i915))
  599. ret = init_phys_status_page(engine);
  600. else
  601. ret = init_status_page(engine);
  602. if (ret)
  603. goto err_breadcrumbs;
  604. return 0;
  605. err_breadcrumbs:
  606. intel_engine_fini_breadcrumbs(engine);
  607. err_unpin_preempt:
  608. if (i915->preempt_context)
  609. __intel_context_unpin(i915->preempt_context, engine);
  610. err_unpin_kernel:
  611. __intel_context_unpin(i915->kernel_context, engine);
  612. return ret;
  613. }
  614. /**
  615. * intel_engines_cleanup_common - cleans up the engine state created by
  616. * the common initiailizers.
  617. * @engine: Engine to cleanup.
  618. *
  619. * This cleans up everything created by the common helpers.
  620. */
  621. void intel_engine_cleanup_common(struct intel_engine_cs *engine)
  622. {
  623. struct drm_i915_private *i915 = engine->i915;
  624. intel_engine_cleanup_scratch(engine);
  625. if (HWS_NEEDS_PHYSICAL(engine->i915))
  626. cleanup_phys_status_page(engine);
  627. else
  628. cleanup_status_page(engine);
  629. intel_engine_fini_breadcrumbs(engine);
  630. intel_engine_cleanup_cmd_parser(engine);
  631. i915_gem_batch_pool_fini(&engine->batch_pool);
  632. if (engine->default_state)
  633. i915_gem_object_put(engine->default_state);
  634. if (i915->preempt_context)
  635. __intel_context_unpin(i915->preempt_context, engine);
  636. __intel_context_unpin(i915->kernel_context, engine);
  637. i915_timeline_fini(&engine->timeline);
  638. }
  639. u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
  640. {
  641. struct drm_i915_private *dev_priv = engine->i915;
  642. u64 acthd;
  643. if (INTEL_GEN(dev_priv) >= 8)
  644. acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
  645. RING_ACTHD_UDW(engine->mmio_base));
  646. else if (INTEL_GEN(dev_priv) >= 4)
  647. acthd = I915_READ(RING_ACTHD(engine->mmio_base));
  648. else
  649. acthd = I915_READ(ACTHD);
  650. return acthd;
  651. }
  652. u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
  653. {
  654. struct drm_i915_private *dev_priv = engine->i915;
  655. u64 bbaddr;
  656. if (INTEL_GEN(dev_priv) >= 8)
  657. bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
  658. RING_BBADDR_UDW(engine->mmio_base));
  659. else
  660. bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
  661. return bbaddr;
  662. }
  663. int intel_engine_stop_cs(struct intel_engine_cs *engine)
  664. {
  665. struct drm_i915_private *dev_priv = engine->i915;
  666. const u32 base = engine->mmio_base;
  667. const i915_reg_t mode = RING_MI_MODE(base);
  668. int err;
  669. if (INTEL_GEN(dev_priv) < 3)
  670. return -ENODEV;
  671. GEM_TRACE("%s\n", engine->name);
  672. I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
  673. err = 0;
  674. if (__intel_wait_for_register_fw(dev_priv,
  675. mode, MODE_IDLE, MODE_IDLE,
  676. 1000, 0,
  677. NULL)) {
  678. GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
  679. err = -ETIMEDOUT;
  680. }
  681. /* A final mmio read to let GPU writes be hopefully flushed to memory */
  682. POSTING_READ_FW(mode);
  683. return err;
  684. }
  685. const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
  686. {
  687. switch (type) {
  688. case I915_CACHE_NONE: return " uncached";
  689. case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
  690. case I915_CACHE_L3_LLC: return " L3+LLC";
  691. case I915_CACHE_WT: return " WT";
  692. default: return "";
  693. }
  694. }
  695. u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
  696. {
  697. const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
  698. u32 mcr_s_ss_select;
  699. u32 slice = fls(sseu->slice_mask);
  700. u32 subslice = fls(sseu->subslice_mask[slice]);
  701. if (INTEL_GEN(dev_priv) == 10)
  702. mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
  703. GEN8_MCR_SUBSLICE(subslice);
  704. else if (INTEL_GEN(dev_priv) >= 11)
  705. mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
  706. GEN11_MCR_SUBSLICE(subslice);
  707. else
  708. mcr_s_ss_select = 0;
  709. return mcr_s_ss_select;
  710. }
  711. static inline uint32_t
  712. read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
  713. int subslice, i915_reg_t reg)
  714. {
  715. uint32_t mcr_slice_subslice_mask;
  716. uint32_t mcr_slice_subslice_select;
  717. uint32_t default_mcr_s_ss_select;
  718. uint32_t mcr;
  719. uint32_t ret;
  720. enum forcewake_domains fw_domains;
  721. if (INTEL_GEN(dev_priv) >= 11) {
  722. mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
  723. GEN11_MCR_SUBSLICE_MASK;
  724. mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
  725. GEN11_MCR_SUBSLICE(subslice);
  726. } else {
  727. mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
  728. GEN8_MCR_SUBSLICE_MASK;
  729. mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
  730. GEN8_MCR_SUBSLICE(subslice);
  731. }
  732. default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
  733. fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
  734. FW_REG_READ);
  735. fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
  736. GEN8_MCR_SELECTOR,
  737. FW_REG_READ | FW_REG_WRITE);
  738. spin_lock_irq(&dev_priv->uncore.lock);
  739. intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
  740. mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
  741. WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
  742. default_mcr_s_ss_select);
  743. mcr &= ~mcr_slice_subslice_mask;
  744. mcr |= mcr_slice_subslice_select;
  745. I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
  746. ret = I915_READ_FW(reg);
  747. mcr &= ~mcr_slice_subslice_mask;
  748. mcr |= default_mcr_s_ss_select;
  749. I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
  750. intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
  751. spin_unlock_irq(&dev_priv->uncore.lock);
  752. return ret;
  753. }
  754. /* NB: please notice the memset */
  755. void intel_engine_get_instdone(struct intel_engine_cs *engine,
  756. struct intel_instdone *instdone)
  757. {
  758. struct drm_i915_private *dev_priv = engine->i915;
  759. u32 mmio_base = engine->mmio_base;
  760. int slice;
  761. int subslice;
  762. memset(instdone, 0, sizeof(*instdone));
  763. switch (INTEL_GEN(dev_priv)) {
  764. default:
  765. instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
  766. if (engine->id != RCS)
  767. break;
  768. instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
  769. for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
  770. instdone->sampler[slice][subslice] =
  771. read_subslice_reg(dev_priv, slice, subslice,
  772. GEN7_SAMPLER_INSTDONE);
  773. instdone->row[slice][subslice] =
  774. read_subslice_reg(dev_priv, slice, subslice,
  775. GEN7_ROW_INSTDONE);
  776. }
  777. break;
  778. case 7:
  779. instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
  780. if (engine->id != RCS)
  781. break;
  782. instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
  783. instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
  784. instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
  785. break;
  786. case 6:
  787. case 5:
  788. case 4:
  789. instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
  790. if (engine->id == RCS)
  791. /* HACK: Using the wrong struct member */
  792. instdone->slice_common = I915_READ(GEN4_INSTDONE1);
  793. break;
  794. case 3:
  795. case 2:
  796. instdone->instdone = I915_READ(GEN2_INSTDONE);
  797. break;
  798. }
  799. }
  800. static bool ring_is_idle(struct intel_engine_cs *engine)
  801. {
  802. struct drm_i915_private *dev_priv = engine->i915;
  803. bool idle = true;
  804. /* If the whole device is asleep, the engine must be idle */
  805. if (!intel_runtime_pm_get_if_in_use(dev_priv))
  806. return true;
  807. /* First check that no commands are left in the ring */
  808. if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
  809. (I915_READ_TAIL(engine) & TAIL_ADDR))
  810. idle = false;
  811. /* No bit for gen2, so assume the CS parser is idle */
  812. if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
  813. idle = false;
  814. intel_runtime_pm_put(dev_priv);
  815. return idle;
  816. }
  817. /**
  818. * intel_engine_is_idle() - Report if the engine has finished process all work
  819. * @engine: the intel_engine_cs
  820. *
  821. * Return true if there are no requests pending, nothing left to be submitted
  822. * to hardware, and that the engine is idle.
  823. */
  824. bool intel_engine_is_idle(struct intel_engine_cs *engine)
  825. {
  826. struct drm_i915_private *dev_priv = engine->i915;
  827. /* More white lies, if wedged, hw state is inconsistent */
  828. if (i915_terminally_wedged(&dev_priv->gpu_error))
  829. return true;
  830. /* Any inflight/incomplete requests? */
  831. if (!i915_seqno_passed(intel_engine_get_seqno(engine),
  832. intel_engine_last_submit(engine)))
  833. return false;
  834. if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
  835. return true;
  836. /* Waiting to drain ELSP? */
  837. if (READ_ONCE(engine->execlists.active)) {
  838. struct intel_engine_execlists *execlists = &engine->execlists;
  839. if (tasklet_trylock(&execlists->tasklet)) {
  840. execlists->tasklet.func(execlists->tasklet.data);
  841. tasklet_unlock(&execlists->tasklet);
  842. }
  843. if (READ_ONCE(execlists->active))
  844. return false;
  845. }
  846. /* ELSP is empty, but there are ready requests? E.g. after reset */
  847. if (READ_ONCE(engine->execlists.first))
  848. return false;
  849. /* Ring stopped? */
  850. if (!ring_is_idle(engine))
  851. return false;
  852. return true;
  853. }
  854. bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
  855. {
  856. struct intel_engine_cs *engine;
  857. enum intel_engine_id id;
  858. /*
  859. * If the driver is wedged, HW state may be very inconsistent and
  860. * report that it is still busy, even though we have stopped using it.
  861. */
  862. if (i915_terminally_wedged(&dev_priv->gpu_error))
  863. return true;
  864. for_each_engine(engine, dev_priv, id) {
  865. if (!intel_engine_is_idle(engine))
  866. return false;
  867. }
  868. return true;
  869. }
  870. /**
  871. * intel_engine_has_kernel_context:
  872. * @engine: the engine
  873. *
  874. * Returns true if the last context to be executed on this engine, or has been
  875. * executed if the engine is already idle, is the kernel context
  876. * (#i915.kernel_context).
  877. */
  878. bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
  879. {
  880. const struct intel_context *kernel_context =
  881. to_intel_context(engine->i915->kernel_context, engine);
  882. struct i915_request *rq;
  883. lockdep_assert_held(&engine->i915->drm.struct_mutex);
  884. /*
  885. * Check the last context seen by the engine. If active, it will be
  886. * the last request that remains in the timeline. When idle, it is
  887. * the last executed context as tracked by retirement.
  888. */
  889. rq = __i915_gem_active_peek(&engine->timeline.last_request);
  890. if (rq)
  891. return rq->hw_context == kernel_context;
  892. else
  893. return engine->last_retired_context == kernel_context;
  894. }
  895. void intel_engines_reset_default_submission(struct drm_i915_private *i915)
  896. {
  897. struct intel_engine_cs *engine;
  898. enum intel_engine_id id;
  899. for_each_engine(engine, i915, id)
  900. engine->set_default_submission(engine);
  901. }
  902. /**
  903. * intel_engines_sanitize: called after the GPU has lost power
  904. * @i915: the i915 device
  905. *
  906. * Anytime we reset the GPU, either with an explicit GPU reset or through a
  907. * PCI power cycle, the GPU loses state and we must reset our state tracking
  908. * to match. Note that calling intel_engines_sanitize() if the GPU has not
  909. * been reset results in much confusion!
  910. */
  911. void intel_engines_sanitize(struct drm_i915_private *i915)
  912. {
  913. struct intel_engine_cs *engine;
  914. enum intel_engine_id id;
  915. GEM_TRACE("\n");
  916. for_each_engine(engine, i915, id) {
  917. if (engine->reset.reset)
  918. engine->reset.reset(engine, NULL);
  919. }
  920. }
  921. /**
  922. * intel_engines_park: called when the GT is transitioning from busy->idle
  923. * @i915: the i915 device
  924. *
  925. * The GT is now idle and about to go to sleep (maybe never to wake again?).
  926. * Time for us to tidy and put away our toys (release resources back to the
  927. * system).
  928. */
  929. void intel_engines_park(struct drm_i915_private *i915)
  930. {
  931. struct intel_engine_cs *engine;
  932. enum intel_engine_id id;
  933. for_each_engine(engine, i915, id) {
  934. /* Flush the residual irq tasklets first. */
  935. intel_engine_disarm_breadcrumbs(engine);
  936. tasklet_kill(&engine->execlists.tasklet);
  937. /*
  938. * We are committed now to parking the engines, make sure there
  939. * will be no more interrupts arriving later and the engines
  940. * are truly idle.
  941. */
  942. if (wait_for(intel_engine_is_idle(engine), 10)) {
  943. struct drm_printer p = drm_debug_printer(__func__);
  944. dev_err(i915->drm.dev,
  945. "%s is not idle before parking\n",
  946. engine->name);
  947. intel_engine_dump(engine, &p, NULL);
  948. }
  949. /* Must be reset upon idling, or we may miss the busy wakeup. */
  950. GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);
  951. if (engine->park)
  952. engine->park(engine);
  953. if (engine->pinned_default_state) {
  954. i915_gem_object_unpin_map(engine->default_state);
  955. engine->pinned_default_state = NULL;
  956. }
  957. i915_gem_batch_pool_fini(&engine->batch_pool);
  958. engine->execlists.no_priolist = false;
  959. }
  960. }
  961. /**
  962. * intel_engines_unpark: called when the GT is transitioning from idle->busy
  963. * @i915: the i915 device
  964. *
  965. * The GT was idle and now about to fire up with some new user requests.
  966. */
  967. void intel_engines_unpark(struct drm_i915_private *i915)
  968. {
  969. struct intel_engine_cs *engine;
  970. enum intel_engine_id id;
  971. for_each_engine(engine, i915, id) {
  972. void *map;
  973. /* Pin the default state for fast resets from atomic context. */
  974. map = NULL;
  975. if (engine->default_state)
  976. map = i915_gem_object_pin_map(engine->default_state,
  977. I915_MAP_WB);
  978. if (!IS_ERR_OR_NULL(map))
  979. engine->pinned_default_state = map;
  980. if (engine->unpark)
  981. engine->unpark(engine);
  982. intel_engine_init_hangcheck(engine);
  983. }
  984. }
  985. /**
  986. * intel_engine_lost_context: called when the GPU is reset into unknown state
  987. * @engine: the engine
  988. *
  989. * We have either reset the GPU or otherwise about to lose state tracking of
  990. * the current GPU logical state (e.g. suspend). On next use, it is therefore
  991. * imperative that we make no presumptions about the current state and load
  992. * from scratch.
  993. */
  994. void intel_engine_lost_context(struct intel_engine_cs *engine)
  995. {
  996. struct intel_context *ce;
  997. lockdep_assert_held(&engine->i915->drm.struct_mutex);
  998. ce = fetch_and_zero(&engine->last_retired_context);
  999. if (ce)
  1000. intel_context_unpin(ce);
  1001. }
  1002. bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
  1003. {
  1004. switch (INTEL_GEN(engine->i915)) {
  1005. case 2:
  1006. return false; /* uses physical not virtual addresses */
  1007. case 3:
  1008. /* maybe only uses physical not virtual addresses */
  1009. return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
  1010. case 6:
  1011. return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
  1012. default:
  1013. return true;
  1014. }
  1015. }
  1016. unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
  1017. {
  1018. struct intel_engine_cs *engine;
  1019. enum intel_engine_id id;
  1020. unsigned int which;
  1021. which = 0;
  1022. for_each_engine(engine, i915, id)
  1023. if (engine->default_state)
  1024. which |= BIT(engine->uabi_class);
  1025. return which;
  1026. }
  1027. static int print_sched_attr(struct drm_i915_private *i915,
  1028. const struct i915_sched_attr *attr,
  1029. char *buf, int x, int len)
  1030. {
  1031. if (attr->priority == I915_PRIORITY_INVALID)
  1032. return x;
  1033. x += snprintf(buf + x, len - x,
  1034. " prio=%d", attr->priority);
  1035. return x;
  1036. }
  1037. static void print_request(struct drm_printer *m,
  1038. struct i915_request *rq,
  1039. const char *prefix)
  1040. {
  1041. const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
  1042. char buf[80] = "";
  1043. int x = 0;
  1044. x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
  1045. drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n",
  1046. prefix,
  1047. rq->global_seqno,
  1048. i915_request_completed(rq) ? "!" : "",
  1049. rq->fence.context, rq->fence.seqno,
  1050. buf,
  1051. jiffies_to_msecs(jiffies - rq->emitted_jiffies),
  1052. name);
  1053. }
  1054. static void hexdump(struct drm_printer *m, const void *buf, size_t len)
  1055. {
  1056. const size_t rowsize = 8 * sizeof(u32);
  1057. const void *prev = NULL;
  1058. bool skip = false;
  1059. size_t pos;
  1060. for (pos = 0; pos < len; pos += rowsize) {
  1061. char line[128];
  1062. if (prev && !memcmp(prev, buf + pos, rowsize)) {
  1063. if (!skip) {
  1064. drm_printf(m, "*\n");
  1065. skip = true;
  1066. }
  1067. continue;
  1068. }
  1069. WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
  1070. rowsize, sizeof(u32),
  1071. line, sizeof(line),
  1072. false) >= sizeof(line));
  1073. drm_printf(m, "[%04zx] %s\n", pos, line);
  1074. prev = buf + pos;
  1075. skip = false;
  1076. }
  1077. }
  1078. static void intel_engine_print_registers(const struct intel_engine_cs *engine,
  1079. struct drm_printer *m)
  1080. {
  1081. struct drm_i915_private *dev_priv = engine->i915;
  1082. const struct intel_engine_execlists * const execlists =
  1083. &engine->execlists;
  1084. u64 addr;
  1085. if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
  1086. drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
  1087. drm_printf(m, "\tRING_START: 0x%08x\n",
  1088. I915_READ(RING_START(engine->mmio_base)));
  1089. drm_printf(m, "\tRING_HEAD: 0x%08x\n",
  1090. I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
  1091. drm_printf(m, "\tRING_TAIL: 0x%08x\n",
  1092. I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
  1093. drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
  1094. I915_READ(RING_CTL(engine->mmio_base)),
  1095. I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
  1096. if (INTEL_GEN(engine->i915) > 2) {
  1097. drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
  1098. I915_READ(RING_MI_MODE(engine->mmio_base)),
  1099. I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
  1100. }
  1101. if (INTEL_GEN(dev_priv) >= 6) {
  1102. drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
  1103. }
  1104. if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
  1105. drm_printf(m, "\tSYNC_0: 0x%08x\n",
  1106. I915_READ(RING_SYNC_0(engine->mmio_base)));
  1107. drm_printf(m, "\tSYNC_1: 0x%08x\n",
  1108. I915_READ(RING_SYNC_1(engine->mmio_base)));
  1109. if (HAS_VEBOX(dev_priv))
  1110. drm_printf(m, "\tSYNC_2: 0x%08x\n",
  1111. I915_READ(RING_SYNC_2(engine->mmio_base)));
  1112. }
  1113. addr = intel_engine_get_active_head(engine);
  1114. drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
  1115. upper_32_bits(addr), lower_32_bits(addr));
  1116. addr = intel_engine_get_last_batch_head(engine);
  1117. drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
  1118. upper_32_bits(addr), lower_32_bits(addr));
  1119. if (INTEL_GEN(dev_priv) >= 8)
  1120. addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
  1121. RING_DMA_FADD_UDW(engine->mmio_base));
  1122. else if (INTEL_GEN(dev_priv) >= 4)
  1123. addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
  1124. else
  1125. addr = I915_READ(DMA_FADD_I8XX);
  1126. drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
  1127. upper_32_bits(addr), lower_32_bits(addr));
  1128. if (INTEL_GEN(dev_priv) >= 4) {
  1129. drm_printf(m, "\tIPEIR: 0x%08x\n",
  1130. I915_READ(RING_IPEIR(engine->mmio_base)));
  1131. drm_printf(m, "\tIPEHR: 0x%08x\n",
  1132. I915_READ(RING_IPEHR(engine->mmio_base)));
  1133. } else {
  1134. drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
  1135. drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
  1136. }
  1137. if (HAS_EXECLISTS(dev_priv)) {
  1138. const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
  1139. u32 ptr, read, write;
  1140. unsigned int idx;
  1141. drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
  1142. I915_READ(RING_EXECLIST_STATUS_LO(engine)),
  1143. I915_READ(RING_EXECLIST_STATUS_HI(engine)));
  1144. ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
  1145. read = GEN8_CSB_READ_PTR(ptr);
  1146. write = GEN8_CSB_WRITE_PTR(ptr);
  1147. drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n",
  1148. read, execlists->csb_head,
  1149. write,
  1150. intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
  1151. yesno(test_bit(ENGINE_IRQ_EXECLIST,
  1152. &engine->irq_posted)),
  1153. yesno(test_bit(TASKLET_STATE_SCHED,
  1154. &engine->execlists.tasklet.state)),
  1155. enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
  1156. if (read >= GEN8_CSB_ENTRIES)
  1157. read = 0;
  1158. if (write >= GEN8_CSB_ENTRIES)
  1159. write = 0;
  1160. if (read > write)
  1161. write += GEN8_CSB_ENTRIES;
  1162. while (read < write) {
  1163. idx = ++read % GEN8_CSB_ENTRIES;
  1164. drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
  1165. idx,
  1166. I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
  1167. hws[idx * 2],
  1168. I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
  1169. hws[idx * 2 + 1]);
  1170. }
  1171. rcu_read_lock();
  1172. for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
  1173. struct i915_request *rq;
  1174. unsigned int count;
  1175. rq = port_unpack(&execlists->port[idx], &count);
  1176. if (rq) {
  1177. char hdr[80];
  1178. snprintf(hdr, sizeof(hdr),
  1179. "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ",
  1180. idx, count,
  1181. i915_ggtt_offset(rq->ring->vma));
  1182. print_request(m, rq, hdr);
  1183. } else {
  1184. drm_printf(m, "\t\tELSP[%d] idle\n", idx);
  1185. }
  1186. }
  1187. drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
  1188. rcu_read_unlock();
  1189. } else if (INTEL_GEN(dev_priv) > 6) {
  1190. drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
  1191. I915_READ(RING_PP_DIR_BASE(engine)));
  1192. drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
  1193. I915_READ(RING_PP_DIR_BASE_READ(engine)));
  1194. drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
  1195. I915_READ(RING_PP_DIR_DCLV(engine)));
  1196. }
  1197. }
  1198. static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
  1199. {
  1200. void *ring;
  1201. int size;
  1202. drm_printf(m,
  1203. "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
  1204. rq->head, rq->postfix, rq->tail,
  1205. rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
  1206. rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
  1207. size = rq->tail - rq->head;
  1208. if (rq->tail < rq->head)
  1209. size += rq->ring->size;
  1210. ring = kmalloc(size, GFP_ATOMIC);
  1211. if (ring) {
  1212. const void *vaddr = rq->ring->vaddr;
  1213. unsigned int head = rq->head;
  1214. unsigned int len = 0;
  1215. if (rq->tail < head) {
  1216. len = rq->ring->size - head;
  1217. memcpy(ring, vaddr + head, len);
  1218. head = 0;
  1219. }
  1220. memcpy(ring + len, vaddr + head, size - len);
  1221. hexdump(m, ring, size);
  1222. kfree(ring);
  1223. }
  1224. }
  1225. void intel_engine_dump(struct intel_engine_cs *engine,
  1226. struct drm_printer *m,
  1227. const char *header, ...)
  1228. {
  1229. const int MAX_REQUESTS_TO_SHOW = 8;
  1230. struct intel_breadcrumbs * const b = &engine->breadcrumbs;
  1231. const struct intel_engine_execlists * const execlists = &engine->execlists;
  1232. struct i915_gpu_error * const error = &engine->i915->gpu_error;
  1233. struct i915_request *rq, *last;
  1234. unsigned long flags;
  1235. struct rb_node *rb;
  1236. int count;
  1237. if (header) {
  1238. va_list ap;
  1239. va_start(ap, header);
  1240. drm_vprintf(m, header, &ap);
  1241. va_end(ap);
  1242. }
  1243. if (i915_terminally_wedged(&engine->i915->gpu_error))
  1244. drm_printf(m, "*** WEDGED ***\n");
  1245. drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
  1246. intel_engine_get_seqno(engine),
  1247. intel_engine_last_submit(engine),
  1248. engine->hangcheck.seqno,
  1249. jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
  1250. drm_printf(m, "\tReset count: %d (global %d)\n",
  1251. i915_reset_engine_count(error, engine),
  1252. i915_reset_count(error));
  1253. rcu_read_lock();
  1254. drm_printf(m, "\tRequests:\n");
  1255. rq = list_first_entry(&engine->timeline.requests,
  1256. struct i915_request, link);
  1257. if (&rq->link != &engine->timeline.requests)
  1258. print_request(m, rq, "\t\tfirst ");
  1259. rq = list_last_entry(&engine->timeline.requests,
  1260. struct i915_request, link);
  1261. if (&rq->link != &engine->timeline.requests)
  1262. print_request(m, rq, "\t\tlast ");
  1263. rq = i915_gem_find_active_request(engine);
  1264. if (rq) {
  1265. print_request(m, rq, "\t\tactive ");
  1266. drm_printf(m, "\t\tring->start: 0x%08x\n",
  1267. i915_ggtt_offset(rq->ring->vma));
  1268. drm_printf(m, "\t\tring->head: 0x%08x\n",
  1269. rq->ring->head);
  1270. drm_printf(m, "\t\tring->tail: 0x%08x\n",
  1271. rq->ring->tail);
  1272. drm_printf(m, "\t\tring->emit: 0x%08x\n",
  1273. rq->ring->emit);
  1274. drm_printf(m, "\t\tring->space: 0x%08x\n",
  1275. rq->ring->space);
  1276. print_request_ring(m, rq);
  1277. }
  1278. rcu_read_unlock();
  1279. if (intel_runtime_pm_get_if_in_use(engine->i915)) {
  1280. intel_engine_print_registers(engine, m);
  1281. intel_runtime_pm_put(engine->i915);
  1282. } else {
  1283. drm_printf(m, "\tDevice is asleep; skipping register dump\n");
  1284. }
  1285. local_irq_save(flags);
  1286. spin_lock(&engine->timeline.lock);
  1287. last = NULL;
  1288. count = 0;
  1289. list_for_each_entry(rq, &engine->timeline.requests, link) {
  1290. if (count++ < MAX_REQUESTS_TO_SHOW - 1)
  1291. print_request(m, rq, "\t\tE ");
  1292. else
  1293. last = rq;
  1294. }
  1295. if (last) {
  1296. if (count > MAX_REQUESTS_TO_SHOW) {
  1297. drm_printf(m,
  1298. "\t\t...skipping %d executing requests...\n",
  1299. count - MAX_REQUESTS_TO_SHOW);
  1300. }
  1301. print_request(m, last, "\t\tE ");
  1302. }
  1303. last = NULL;
  1304. count = 0;
  1305. drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
  1306. for (rb = execlists->first; rb; rb = rb_next(rb)) {
  1307. struct i915_priolist *p =
  1308. rb_entry(rb, typeof(*p), node);
  1309. list_for_each_entry(rq, &p->requests, sched.link) {
  1310. if (count++ < MAX_REQUESTS_TO_SHOW - 1)
  1311. print_request(m, rq, "\t\tQ ");
  1312. else
  1313. last = rq;
  1314. }
  1315. }
  1316. if (last) {
  1317. if (count > MAX_REQUESTS_TO_SHOW) {
  1318. drm_printf(m,
  1319. "\t\t...skipping %d queued requests...\n",
  1320. count - MAX_REQUESTS_TO_SHOW);
  1321. }
  1322. print_request(m, last, "\t\tQ ");
  1323. }
  1324. spin_unlock(&engine->timeline.lock);
  1325. spin_lock(&b->rb_lock);
  1326. for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
  1327. struct intel_wait *w = rb_entry(rb, typeof(*w), node);
  1328. drm_printf(m, "\t%s [%d] waiting for %x\n",
  1329. w->tsk->comm, w->tsk->pid, w->seqno);
  1330. }
  1331. spin_unlock(&b->rb_lock);
  1332. local_irq_restore(flags);
  1333. drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
  1334. engine->irq_posted,
  1335. yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
  1336. &engine->irq_posted)),
  1337. yesno(test_bit(ENGINE_IRQ_EXECLIST,
  1338. &engine->irq_posted)));
  1339. drm_printf(m, "HWSP:\n");
  1340. hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
  1341. drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
  1342. }
  1343. static u8 user_class_map[] = {
  1344. [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
  1345. [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
  1346. [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
  1347. [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
  1348. };
  1349. struct intel_engine_cs *
  1350. intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
  1351. {
  1352. if (class >= ARRAY_SIZE(user_class_map))
  1353. return NULL;
  1354. class = user_class_map[class];
  1355. GEM_BUG_ON(class > MAX_ENGINE_CLASS);
  1356. if (instance > MAX_ENGINE_INSTANCE)
  1357. return NULL;
  1358. return i915->engine_class[class][instance];
  1359. }
  1360. /**
  1361. * intel_enable_engine_stats() - Enable engine busy tracking on engine
  1362. * @engine: engine to enable stats collection
  1363. *
  1364. * Start collecting the engine busyness data for @engine.
  1365. *
  1366. * Returns 0 on success or a negative error code.
  1367. */
  1368. int intel_enable_engine_stats(struct intel_engine_cs *engine)
  1369. {
  1370. struct intel_engine_execlists *execlists = &engine->execlists;
  1371. unsigned long flags;
  1372. int err = 0;
  1373. if (!intel_engine_supports_stats(engine))
  1374. return -ENODEV;
  1375. tasklet_disable(&execlists->tasklet);
  1376. write_seqlock_irqsave(&engine->stats.lock, flags);
  1377. if (unlikely(engine->stats.enabled == ~0)) {
  1378. err = -EBUSY;
  1379. goto unlock;
  1380. }
  1381. if (engine->stats.enabled++ == 0) {
  1382. const struct execlist_port *port = execlists->port;
  1383. unsigned int num_ports = execlists_num_ports(execlists);
  1384. engine->stats.enabled_at = ktime_get();
  1385. /* XXX submission method oblivious? */
  1386. while (num_ports-- && port_isset(port)) {
  1387. engine->stats.active++;
  1388. port++;
  1389. }
  1390. if (engine->stats.active)
  1391. engine->stats.start = engine->stats.enabled_at;
  1392. }
  1393. unlock:
  1394. write_sequnlock_irqrestore(&engine->stats.lock, flags);
  1395. tasklet_enable(&execlists->tasklet);
  1396. return err;
  1397. }
  1398. static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
  1399. {
  1400. ktime_t total = engine->stats.total;
  1401. /*
  1402. * If the engine is executing something at the moment
  1403. * add it to the total.
  1404. */
  1405. if (engine->stats.active)
  1406. total = ktime_add(total,
  1407. ktime_sub(ktime_get(), engine->stats.start));
  1408. return total;
  1409. }
  1410. /**
  1411. * intel_engine_get_busy_time() - Return current accumulated engine busyness
  1412. * @engine: engine to report on
  1413. *
  1414. * Returns accumulated time @engine was busy since engine stats were enabled.
  1415. */
  1416. ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
  1417. {
  1418. unsigned int seq;
  1419. ktime_t total;
  1420. do {
  1421. seq = read_seqbegin(&engine->stats.lock);
  1422. total = __intel_engine_get_busy_time(engine);
  1423. } while (read_seqretry(&engine->stats.lock, seq));
  1424. return total;
  1425. }
  1426. /**
  1427. * intel_disable_engine_stats() - Disable engine busy tracking on engine
  1428. * @engine: engine to disable stats collection
  1429. *
  1430. * Stops collecting the engine busyness data for @engine.
  1431. */
  1432. void intel_disable_engine_stats(struct intel_engine_cs *engine)
  1433. {
  1434. unsigned long flags;
  1435. if (!intel_engine_supports_stats(engine))
  1436. return;
  1437. write_seqlock_irqsave(&engine->stats.lock, flags);
  1438. WARN_ON_ONCE(engine->stats.enabled == 0);
  1439. if (--engine->stats.enabled == 0) {
  1440. engine->stats.total = __intel_engine_get_busy_time(engine);
  1441. engine->stats.active = 0;
  1442. }
  1443. write_sequnlock_irqrestore(&engine->stats.lock, flags);
  1444. }
  1445. #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  1446. #include "selftests/mock_engine.c"
  1447. #include "selftests/intel_engine_cs.c"
  1448. #endif