intel_ringbuffer.h 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. #ifndef _INTEL_RINGBUFFER_H_
  2. #define _INTEL_RINGBUFFER_H_
  3. #include <linux/hashtable.h>
  4. #include "i915_gem_batch_pool.h"
  5. #include "i915_gem_request.h"
  6. #include "i915_gem_timeline.h"
  7. #include "i915_pmu.h"
  8. #include "i915_selftest.h"
  9. struct drm_printer;
  10. #define I915_CMD_HASH_ORDER 9
  11. /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
  12. * but keeps the logic simple. Indeed, the whole purpose of this macro is just
  13. * to give some inclination as to some of the magic values used in the various
  14. * workarounds!
  15. */
  16. #define CACHELINE_BYTES 64
  17. #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
  18. struct intel_hw_status_page {
  19. struct i915_vma *vma;
  20. u32 *page_addr;
  21. u32 ggtt_offset;
  22. };
  23. #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
  24. #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
  25. #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
  26. #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
  27. #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
  28. #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
  29. #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
  30. #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
  31. #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
  32. #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
  33. #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
  34. #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
  35. /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  36. * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
  37. */
  38. enum intel_engine_hangcheck_action {
  39. ENGINE_IDLE = 0,
  40. ENGINE_WAIT,
  41. ENGINE_ACTIVE_SEQNO,
  42. ENGINE_ACTIVE_HEAD,
  43. ENGINE_ACTIVE_SUBUNITS,
  44. ENGINE_WAIT_KICK,
  45. ENGINE_DEAD,
  46. };
  47. static inline const char *
  48. hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
  49. {
  50. switch (a) {
  51. case ENGINE_IDLE:
  52. return "idle";
  53. case ENGINE_WAIT:
  54. return "wait";
  55. case ENGINE_ACTIVE_SEQNO:
  56. return "active seqno";
  57. case ENGINE_ACTIVE_HEAD:
  58. return "active head";
  59. case ENGINE_ACTIVE_SUBUNITS:
  60. return "active subunits";
  61. case ENGINE_WAIT_KICK:
  62. return "wait kick";
  63. case ENGINE_DEAD:
  64. return "dead";
  65. }
  66. return "unknown";
  67. }
  68. #define I915_MAX_SLICES 3
  69. #define I915_MAX_SUBSLICES 3
  70. #define instdone_slice_mask(dev_priv__) \
  71. (INTEL_GEN(dev_priv__) == 7 ? \
  72. 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
  73. #define instdone_subslice_mask(dev_priv__) \
  74. (INTEL_GEN(dev_priv__) == 7 ? \
  75. 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
  76. #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
  77. for ((slice__) = 0, (subslice__) = 0; \
  78. (slice__) < I915_MAX_SLICES; \
  79. (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
  80. (slice__) += ((subslice__) == 0)) \
  81. for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
  82. (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
  83. struct intel_instdone {
  84. u32 instdone;
  85. /* The following exist only in the RCS engine */
  86. u32 slice_common;
  87. u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
  88. u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
  89. };
  90. struct intel_engine_hangcheck {
  91. u64 acthd;
  92. u32 seqno;
  93. enum intel_engine_hangcheck_action action;
  94. unsigned long action_timestamp;
  95. int deadlock;
  96. struct intel_instdone instdone;
  97. struct drm_i915_gem_request *active_request;
  98. bool stalled;
  99. };
  100. struct intel_ring {
  101. struct i915_vma *vma;
  102. void *vaddr;
  103. struct list_head request_list;
  104. u32 head;
  105. u32 tail;
  106. u32 emit;
  107. u32 space;
  108. u32 size;
  109. u32 effective_size;
  110. };
  111. struct i915_gem_context;
  112. struct drm_i915_reg_table;
  113. /*
  114. * we use a single page to load ctx workarounds so all of these
  115. * values are referred in terms of dwords
  116. *
  117. * struct i915_wa_ctx_bb:
  118. * offset: specifies batch starting position, also helpful in case
  119. * if we want to have multiple batches at different offsets based on
  120. * some criteria. It is not a requirement at the moment but provides
  121. * an option for future use.
  122. * size: size of the batch in DWORDS
  123. */
  124. struct i915_ctx_workarounds {
  125. struct i915_wa_ctx_bb {
  126. u32 offset;
  127. u32 size;
  128. } indirect_ctx, per_ctx;
  129. struct i915_vma *vma;
  130. };
  131. struct drm_i915_gem_request;
  132. /*
  133. * Engine IDs definitions.
  134. * Keep instances of the same type engine together.
  135. */
  136. enum intel_engine_id {
  137. RCS = 0,
  138. BCS,
  139. VCS,
  140. VCS2,
  141. #define _VCS(n) (VCS + (n))
  142. VECS
  143. };
  144. struct i915_priolist {
  145. struct rb_node node;
  146. struct list_head requests;
  147. int priority;
  148. };
  149. /**
  150. * struct intel_engine_execlists - execlist submission queue and port state
  151. *
  152. * The struct intel_engine_execlists represents the combined logical state of
  153. * driver and the hardware state for execlist mode of submission.
  154. */
  155. struct intel_engine_execlists {
  156. /**
  157. * @tasklet: softirq tasklet for bottom handler
  158. */
  159. struct tasklet_struct tasklet;
  160. /**
  161. * @default_priolist: priority list for I915_PRIORITY_NORMAL
  162. */
  163. struct i915_priolist default_priolist;
  164. /**
  165. * @no_priolist: priority lists disabled
  166. */
  167. bool no_priolist;
  168. /**
  169. * @port: execlist port states
  170. *
  171. * For each hardware ELSP (ExecList Submission Port) we keep
  172. * track of the last request and the number of times we submitted
  173. * that port to hw. We then count the number of times the hw reports
  174. * a context completion or preemption. As only one context can
  175. * be active on hw, we limit resubmission of context to port[0]. This
  176. * is called Lite Restore, of the context.
  177. */
  178. struct execlist_port {
  179. /**
  180. * @request_count: combined request and submission count
  181. */
  182. struct drm_i915_gem_request *request_count;
  183. #define EXECLIST_COUNT_BITS 2
  184. #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
  185. #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
  186. #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
  187. #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
  188. #define port_set(p, packed) ((p)->request_count = (packed))
  189. #define port_isset(p) ((p)->request_count)
  190. #define port_index(p, execlists) ((p) - (execlists)->port)
  191. /**
  192. * @context_id: context ID for port
  193. */
  194. GEM_DEBUG_DECL(u32 context_id);
  195. #define EXECLIST_MAX_PORTS 2
  196. } port[EXECLIST_MAX_PORTS];
  197. /**
  198. * @active: is the HW active? We consider the HW as active after
  199. * submitting any context for execution and until we have seen the
  200. * last context completion event. After that, we do not expect any
  201. * more events until we submit, and so can park the HW.
  202. *
  203. * As we have a small number of different sources from which we feed
  204. * the HW, we track the state of each inside a single bitfield.
  205. */
  206. unsigned int active;
  207. #define EXECLISTS_ACTIVE_USER 0
  208. #define EXECLISTS_ACTIVE_PREEMPT 1
  209. #define EXECLISTS_ACTIVE_HWACK 2
  210. /**
  211. * @port_mask: number of execlist ports - 1
  212. */
  213. unsigned int port_mask;
  214. /**
  215. * @queue: queue of requests, in priority lists
  216. */
  217. struct rb_root queue;
  218. /**
  219. * @first: leftmost level in priority @queue
  220. */
  221. struct rb_node *first;
  222. /**
  223. * @fw_domains: forcewake domains for irq tasklet
  224. */
  225. unsigned int fw_domains;
  226. /**
  227. * @csb_head: context status buffer head
  228. */
  229. unsigned int csb_head;
  230. /**
  231. * @csb_use_mmio: access csb through mmio, instead of hwsp
  232. */
  233. bool csb_use_mmio;
  234. };
  235. #define INTEL_ENGINE_CS_MAX_NAME 8
  236. struct intel_engine_cs {
  237. struct drm_i915_private *i915;
  238. char name[INTEL_ENGINE_CS_MAX_NAME];
  239. enum intel_engine_id id;
  240. unsigned int hw_id;
  241. unsigned int guc_id;
  242. u8 uabi_id;
  243. u8 uabi_class;
  244. u8 class;
  245. u8 instance;
  246. u32 context_size;
  247. u32 mmio_base;
  248. unsigned int irq_shift;
  249. struct intel_ring *buffer;
  250. struct intel_timeline *timeline;
  251. struct drm_i915_gem_object *default_state;
  252. atomic_t irq_count;
  253. unsigned long irq_posted;
  254. #define ENGINE_IRQ_BREADCRUMB 0
  255. #define ENGINE_IRQ_EXECLIST 1
  256. /* Rather than have every client wait upon all user interrupts,
  257. * with the herd waking after every interrupt and each doing the
  258. * heavyweight seqno dance, we delegate the task (of being the
  259. * bottom-half of the user interrupt) to the first client. After
  260. * every interrupt, we wake up one client, who does the heavyweight
  261. * coherent seqno read and either goes back to sleep (if incomplete),
  262. * or wakes up all the completed clients in parallel, before then
  263. * transferring the bottom-half status to the next client in the queue.
  264. *
  265. * Compared to walking the entire list of waiters in a single dedicated
  266. * bottom-half, we reduce the latency of the first waiter by avoiding
  267. * a context switch, but incur additional coherent seqno reads when
  268. * following the chain of request breadcrumbs. Since it is most likely
  269. * that we have a single client waiting on each seqno, then reducing
  270. * the overhead of waking that client is much preferred.
  271. */
  272. struct intel_breadcrumbs {
  273. spinlock_t irq_lock; /* protects irq_*; irqsafe */
  274. struct intel_wait *irq_wait; /* oldest waiter by retirement */
  275. spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
  276. struct rb_root waiters; /* sorted by retirement, priority */
  277. struct rb_root signals; /* sorted by retirement */
  278. struct task_struct *signaler; /* used for fence signalling */
  279. struct drm_i915_gem_request __rcu *first_signal;
  280. struct timer_list fake_irq; /* used after a missed interrupt */
  281. struct timer_list hangcheck; /* detect missed interrupts */
  282. unsigned int hangcheck_interrupts;
  283. unsigned int irq_enabled;
  284. bool irq_armed : 1;
  285. I915_SELFTEST_DECLARE(bool mock : 1);
  286. } breadcrumbs;
  287. struct {
  288. /**
  289. * @enable: Bitmask of enable sample events on this engine.
  290. *
  291. * Bits correspond to sample event types, for instance
  292. * I915_SAMPLE_QUEUED is bit 0 etc.
  293. */
  294. u32 enable;
  295. /**
  296. * @enable_count: Reference count for the enabled samplers.
  297. *
  298. * Index number corresponds to the bit number from @enable.
  299. */
  300. unsigned int enable_count[I915_PMU_SAMPLE_BITS];
  301. /**
  302. * @sample: Counter values for sampling events.
  303. *
  304. * Our internal timer stores the current counters in this field.
  305. */
  306. #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
  307. struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
  308. /**
  309. * @busy_stats: Has enablement of engine stats tracking been
  310. * requested.
  311. */
  312. bool busy_stats;
  313. /**
  314. * @disable_busy_stats: Work item for busy stats disabling.
  315. *
  316. * Same as with @enable_busy_stats action, with the difference
  317. * that we delay it in case there are rapid enable-disable
  318. * actions, which can happen during tool startup (like perf
  319. * stat).
  320. */
  321. struct delayed_work disable_busy_stats;
  322. } pmu;
  323. /*
  324. * A pool of objects to use as shadow copies of client batch buffers
  325. * when the command parser is enabled. Prevents the client from
  326. * modifying the batch contents after software parsing.
  327. */
  328. struct i915_gem_batch_pool batch_pool;
  329. struct intel_hw_status_page status_page;
  330. struct i915_ctx_workarounds wa_ctx;
  331. struct i915_vma *scratch;
  332. u32 irq_keep_mask; /* always keep these interrupts */
  333. u32 irq_enable_mask; /* bitmask to enable ring interrupt */
  334. void (*irq_enable)(struct intel_engine_cs *engine);
  335. void (*irq_disable)(struct intel_engine_cs *engine);
  336. int (*init_hw)(struct intel_engine_cs *engine);
  337. void (*reset_hw)(struct intel_engine_cs *engine,
  338. struct drm_i915_gem_request *req);
  339. void (*park)(struct intel_engine_cs *engine);
  340. void (*unpark)(struct intel_engine_cs *engine);
  341. void (*set_default_submission)(struct intel_engine_cs *engine);
  342. struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
  343. struct i915_gem_context *ctx);
  344. void (*context_unpin)(struct intel_engine_cs *engine,
  345. struct i915_gem_context *ctx);
  346. int (*request_alloc)(struct drm_i915_gem_request *req);
  347. int (*init_context)(struct drm_i915_gem_request *req);
  348. int (*emit_flush)(struct drm_i915_gem_request *request,
  349. u32 mode);
  350. #define EMIT_INVALIDATE BIT(0)
  351. #define EMIT_FLUSH BIT(1)
  352. #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
  353. int (*emit_bb_start)(struct drm_i915_gem_request *req,
  354. u64 offset, u32 length,
  355. unsigned int dispatch_flags);
  356. #define I915_DISPATCH_SECURE BIT(0)
  357. #define I915_DISPATCH_PINNED BIT(1)
  358. #define I915_DISPATCH_RS BIT(2)
  359. void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
  360. u32 *cs);
  361. int emit_breadcrumb_sz;
  362. /* Pass the request to the hardware queue (e.g. directly into
  363. * the legacy ringbuffer or to the end of an execlist).
  364. *
  365. * This is called from an atomic context with irqs disabled; must
  366. * be irq safe.
  367. */
  368. void (*submit_request)(struct drm_i915_gem_request *req);
  369. /* Call when the priority on a request has changed and it and its
  370. * dependencies may need rescheduling. Note the request itself may
  371. * not be ready to run!
  372. *
  373. * Called under the struct_mutex.
  374. */
  375. void (*schedule)(struct drm_i915_gem_request *request,
  376. int priority);
  377. /*
  378. * Cancel all requests on the hardware, or queued for execution.
  379. * This should only cancel the ready requests that have been
  380. * submitted to the engine (via the engine->submit_request callback).
  381. * This is called when marking the device as wedged.
  382. */
  383. void (*cancel_requests)(struct intel_engine_cs *engine);
  384. /* Some chipsets are not quite as coherent as advertised and need
  385. * an expensive kick to force a true read of the up-to-date seqno.
  386. * However, the up-to-date seqno is not always required and the last
  387. * seen value is good enough. Note that the seqno will always be
  388. * monotonic, even if not coherent.
  389. */
  390. void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
  391. void (*cleanup)(struct intel_engine_cs *engine);
  392. /* GEN8 signal/wait table - never trust comments!
  393. * signal to signal to signal to signal to signal to
  394. * RCS VCS BCS VECS VCS2
  395. * --------------------------------------------------------------------
  396. * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
  397. * |-------------------------------------------------------------------
  398. * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
  399. * |-------------------------------------------------------------------
  400. * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
  401. * |-------------------------------------------------------------------
  402. * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
  403. * |-------------------------------------------------------------------
  404. * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
  405. * |-------------------------------------------------------------------
  406. *
  407. * Generalization:
  408. * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
  409. * ie. transpose of g(x, y)
  410. *
  411. * sync from sync from sync from sync from sync from
  412. * RCS VCS BCS VECS VCS2
  413. * --------------------------------------------------------------------
  414. * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
  415. * |-------------------------------------------------------------------
  416. * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
  417. * |-------------------------------------------------------------------
  418. * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
  419. * |-------------------------------------------------------------------
  420. * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
  421. * |-------------------------------------------------------------------
  422. * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
  423. * |-------------------------------------------------------------------
  424. *
  425. * Generalization:
  426. * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
  427. * ie. transpose of f(x, y)
  428. */
  429. struct {
  430. #define GEN6_SEMAPHORE_LAST VECS_HW
  431. #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
  432. #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
  433. struct {
  434. /* our mbox written by others */
  435. u32 wait[GEN6_NUM_SEMAPHORES];
  436. /* mboxes this ring signals to */
  437. i915_reg_t signal[GEN6_NUM_SEMAPHORES];
  438. } mbox;
  439. /* AKA wait() */
  440. int (*sync_to)(struct drm_i915_gem_request *req,
  441. struct drm_i915_gem_request *signal);
  442. u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
  443. } semaphore;
  444. struct intel_engine_execlists execlists;
  445. /* Contexts are pinned whilst they are active on the GPU. The last
  446. * context executed remains active whilst the GPU is idle - the
  447. * switch away and write to the context object only occurs on the
  448. * next execution. Contexts are only unpinned on retirement of the
  449. * following request ensuring that we can always write to the object
  450. * on the context switch even after idling. Across suspend, we switch
  451. * to the kernel context and trash it as the save may not happen
  452. * before the hardware is powered down.
  453. */
  454. struct i915_gem_context *last_retired_context;
  455. /* We track the current MI_SET_CONTEXT in order to eliminate
  456. * redudant context switches. This presumes that requests are not
  457. * reordered! Or when they are the tracking is updated along with
  458. * the emission of individual requests into the legacy command
  459. * stream (ring).
  460. */
  461. struct i915_gem_context *legacy_active_context;
  462. struct i915_hw_ppgtt *legacy_active_ppgtt;
  463. /* status_notifier: list of callbacks for context-switch changes */
  464. struct atomic_notifier_head context_status_notifier;
  465. struct intel_engine_hangcheck hangcheck;
  466. #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
  467. unsigned int flags;
  468. /*
  469. * Table of commands the command parser needs to know about
  470. * for this engine.
  471. */
  472. DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
  473. /*
  474. * Table of registers allowed in commands that read/write registers.
  475. */
  476. const struct drm_i915_reg_table *reg_tables;
  477. int reg_table_count;
  478. /*
  479. * Returns the bitmask for the length field of the specified command.
  480. * Return 0 for an unrecognized/invalid command.
  481. *
  482. * If the command parser finds an entry for a command in the engine's
  483. * cmd_tables, it gets the command's length based on the table entry.
  484. * If not, it calls this function to determine the per-engine length
  485. * field encoding for the command (i.e. different opcode ranges use
  486. * certain bits to encode the command length in the header).
  487. */
  488. u32 (*get_cmd_length_mask)(u32 cmd_header);
  489. struct {
  490. /**
  491. * @lock: Lock protecting the below fields.
  492. */
  493. spinlock_t lock;
  494. /**
  495. * @enabled: Reference count indicating number of listeners.
  496. */
  497. unsigned int enabled;
  498. /**
  499. * @active: Number of contexts currently scheduled in.
  500. */
  501. unsigned int active;
  502. /**
  503. * @enabled_at: Timestamp when busy stats were enabled.
  504. */
  505. ktime_t enabled_at;
  506. /**
  507. * @start: Timestamp of the last idle to active transition.
  508. *
  509. * Idle is defined as active == 0, active is active > 0.
  510. */
  511. ktime_t start;
  512. /**
  513. * @total: Total time this engine was busy.
  514. *
  515. * Accumulated time not counting the most recent block in cases
  516. * where engine is currently busy (active > 0).
  517. */
  518. ktime_t total;
  519. } stats;
  520. };
  521. static inline bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
  522. {
  523. return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
  524. }
  525. static inline void
  526. execlists_set_active(struct intel_engine_execlists *execlists,
  527. unsigned int bit)
  528. {
  529. __set_bit(bit, (unsigned long *)&execlists->active);
  530. }
  531. static inline void
  532. execlists_clear_active(struct intel_engine_execlists *execlists,
  533. unsigned int bit)
  534. {
  535. __clear_bit(bit, (unsigned long *)&execlists->active);
  536. }
  537. static inline bool
  538. execlists_is_active(const struct intel_engine_execlists *execlists,
  539. unsigned int bit)
  540. {
  541. return test_bit(bit, (unsigned long *)&execlists->active);
  542. }
  543. void
  544. execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
  545. void
  546. execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
  547. static inline unsigned int
  548. execlists_num_ports(const struct intel_engine_execlists * const execlists)
  549. {
  550. return execlists->port_mask + 1;
  551. }
  552. static inline void
  553. execlists_port_complete(struct intel_engine_execlists * const execlists,
  554. struct execlist_port * const port)
  555. {
  556. const unsigned int m = execlists->port_mask;
  557. GEM_BUG_ON(port_index(port, execlists) != 0);
  558. GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
  559. memmove(port, port + 1, m * sizeof(struct execlist_port));
  560. memset(port + m, 0, sizeof(struct execlist_port));
  561. }
  562. static inline unsigned int
  563. intel_engine_flag(const struct intel_engine_cs *engine)
  564. {
  565. return BIT(engine->id);
  566. }
  567. static inline u32
  568. intel_read_status_page(struct intel_engine_cs *engine, int reg)
  569. {
  570. /* Ensure that the compiler doesn't optimize away the load. */
  571. return READ_ONCE(engine->status_page.page_addr[reg]);
  572. }
  573. static inline void
  574. intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
  575. {
  576. /* Writing into the status page should be done sparingly. Since
  577. * we do when we are uncertain of the device state, we take a bit
  578. * of extra paranoia to try and ensure that the HWS takes the value
  579. * we give and that it doesn't end up trapped inside the CPU!
  580. */
  581. if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
  582. mb();
  583. clflush(&engine->status_page.page_addr[reg]);
  584. engine->status_page.page_addr[reg] = value;
  585. clflush(&engine->status_page.page_addr[reg]);
  586. mb();
  587. } else {
  588. WRITE_ONCE(engine->status_page.page_addr[reg], value);
  589. }
  590. }
  591. /*
  592. * Reads a dword out of the status page, which is written to from the command
  593. * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
  594. * MI_STORE_DATA_IMM.
  595. *
  596. * The following dwords have a reserved meaning:
  597. * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
  598. * 0x04: ring 0 head pointer
  599. * 0x05: ring 1 head pointer (915-class)
  600. * 0x06: ring 2 head pointer (915-class)
  601. * 0x10-0x1b: Context status DWords (GM45)
  602. * 0x1f: Last written status offset. (GM45)
  603. * 0x20-0x2f: Reserved (Gen6+)
  604. *
  605. * The area from dword 0x30 to 0x3ff is available for driver usage.
  606. */
  607. #define I915_GEM_HWS_INDEX 0x30
  608. #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
  609. #define I915_GEM_HWS_PREEMPT_INDEX 0x32
  610. #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
  611. #define I915_GEM_HWS_SCRATCH_INDEX 0x40
  612. #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
  613. #define I915_HWS_CSB_BUF0_INDEX 0x10
  614. #define I915_HWS_CSB_WRITE_INDEX 0x1f
  615. #define CNL_HWS_CSB_WRITE_INDEX 0x2f
  616. struct intel_ring *
  617. intel_engine_create_ring(struct intel_engine_cs *engine, int size);
  618. int intel_ring_pin(struct intel_ring *ring,
  619. struct drm_i915_private *i915,
  620. unsigned int offset_bias);
  621. void intel_ring_reset(struct intel_ring *ring, u32 tail);
  622. unsigned int intel_ring_update_space(struct intel_ring *ring);
  623. void intel_ring_unpin(struct intel_ring *ring);
  624. void intel_ring_free(struct intel_ring *ring);
  625. void intel_engine_stop(struct intel_engine_cs *engine);
  626. void intel_engine_cleanup(struct intel_engine_cs *engine);
  627. void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
  628. int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
  629. int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
  630. u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
  631. unsigned int n);
  632. static inline void
  633. intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
  634. {
  635. /* Dummy function.
  636. *
  637. * This serves as a placeholder in the code so that the reader
  638. * can compare against the preceding intel_ring_begin() and
  639. * check that the number of dwords emitted matches the space
  640. * reserved for the command packet (i.e. the value passed to
  641. * intel_ring_begin()).
  642. */
  643. GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
  644. }
  645. static inline u32
  646. intel_ring_wrap(const struct intel_ring *ring, u32 pos)
  647. {
  648. return pos & (ring->size - 1);
  649. }
  650. static inline u32
  651. intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
  652. {
  653. /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
  654. u32 offset = addr - req->ring->vaddr;
  655. GEM_BUG_ON(offset > req->ring->size);
  656. return intel_ring_wrap(req->ring, offset);
  657. }
  658. static inline void
  659. assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
  660. {
  661. /* We could combine these into a single tail operation, but keeping
  662. * them as seperate tests will help identify the cause should one
  663. * ever fire.
  664. */
  665. GEM_BUG_ON(!IS_ALIGNED(tail, 8));
  666. GEM_BUG_ON(tail >= ring->size);
  667. /*
  668. * "Ring Buffer Use"
  669. * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
  670. * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
  671. * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
  672. * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
  673. * same cacheline, the Head Pointer must not be greater than the Tail
  674. * Pointer."
  675. *
  676. * We use ring->head as the last known location of the actual RING_HEAD,
  677. * it may have advanced but in the worst case it is equally the same
  678. * as ring->head and so we should never program RING_TAIL to advance
  679. * into the same cacheline as ring->head.
  680. */
  681. #define cacheline(a) round_down(a, CACHELINE_BYTES)
  682. GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
  683. tail < ring->head);
  684. #undef cacheline
  685. }
  686. static inline unsigned int
  687. intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
  688. {
  689. /* Whilst writes to the tail are strictly order, there is no
  690. * serialisation between readers and the writers. The tail may be
  691. * read by i915_gem_request_retire() just as it is being updated
  692. * by execlists, as although the breadcrumb is complete, the context
  693. * switch hasn't been seen.
  694. */
  695. assert_ring_tail_valid(ring, tail);
  696. ring->tail = tail;
  697. return tail;
  698. }
  699. void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
  700. void intel_engine_setup_common(struct intel_engine_cs *engine);
  701. int intel_engine_init_common(struct intel_engine_cs *engine);
  702. int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
  703. void intel_engine_cleanup_common(struct intel_engine_cs *engine);
  704. int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
  705. int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
  706. int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
  707. int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
  708. u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
  709. u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
  710. static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
  711. {
  712. return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
  713. }
  714. static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
  715. {
  716. /* We are only peeking at the tail of the submit queue (and not the
  717. * queue itself) in order to gain a hint as to the current active
  718. * state of the engine. Callers are not expected to be taking
  719. * engine->timeline->lock, nor are they expected to be concerned
  720. * wtih serialising this hint with anything, so document it as
  721. * a hint and nothing more.
  722. */
  723. return READ_ONCE(engine->timeline->seqno);
  724. }
  725. int init_workarounds_ring(struct intel_engine_cs *engine);
  726. int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
  727. void intel_engine_get_instdone(struct intel_engine_cs *engine,
  728. struct intel_instdone *instdone);
  729. /*
  730. * Arbitrary size for largest possible 'add request' sequence. The code paths
  731. * are complex and variable. Empirical measurement shows that the worst case
  732. * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
  733. * we need to allocate double the largest single packet within that emission
  734. * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
  735. */
  736. #define MIN_SPACE_FOR_ADD_REQUEST 336
  737. static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
  738. {
  739. return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
  740. }
  741. static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
  742. {
  743. return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
  744. }
  745. /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
  746. int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
  747. static inline void intel_wait_init(struct intel_wait *wait,
  748. struct drm_i915_gem_request *rq)
  749. {
  750. wait->tsk = current;
  751. wait->request = rq;
  752. }
  753. static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
  754. {
  755. wait->tsk = current;
  756. wait->seqno = seqno;
  757. }
  758. static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
  759. {
  760. return wait->seqno;
  761. }
  762. static inline bool
  763. intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
  764. {
  765. wait->seqno = seqno;
  766. return intel_wait_has_seqno(wait);
  767. }
  768. static inline bool
  769. intel_wait_update_request(struct intel_wait *wait,
  770. const struct drm_i915_gem_request *rq)
  771. {
  772. return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
  773. }
  774. static inline bool
  775. intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
  776. {
  777. return wait->seqno == seqno;
  778. }
  779. static inline bool
  780. intel_wait_check_request(const struct intel_wait *wait,
  781. const struct drm_i915_gem_request *rq)
  782. {
  783. return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
  784. }
  785. static inline bool intel_wait_complete(const struct intel_wait *wait)
  786. {
  787. return RB_EMPTY_NODE(&wait->node);
  788. }
  789. bool intel_engine_add_wait(struct intel_engine_cs *engine,
  790. struct intel_wait *wait);
  791. void intel_engine_remove_wait(struct intel_engine_cs *engine,
  792. struct intel_wait *wait);
  793. void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
  794. bool wakeup);
  795. void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
  796. static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
  797. {
  798. return READ_ONCE(engine->breadcrumbs.irq_wait);
  799. }
  800. unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
  801. #define ENGINE_WAKEUP_WAITER BIT(0)
  802. #define ENGINE_WAKEUP_ASLEEP BIT(1)
  803. void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
  804. void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
  805. void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
  806. void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
  807. void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
  808. void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
  809. bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
  810. static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
  811. {
  812. memset(batch, 0, 6 * sizeof(u32));
  813. batch[0] = GFX_OP_PIPE_CONTROL(6);
  814. batch[1] = flags;
  815. batch[2] = offset;
  816. return batch + 6;
  817. }
  818. static inline u32 *
  819. gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
  820. {
  821. /* We're using qword write, offset should be aligned to 8 bytes. */
  822. GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
  823. /* w/a for post sync ops following a GPGPU operation we
  824. * need a prior CS_STALL, which is emitted by the flush
  825. * following the batch.
  826. */
  827. *cs++ = GFX_OP_PIPE_CONTROL(6);
  828. *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
  829. PIPE_CONTROL_QW_WRITE;
  830. *cs++ = gtt_offset;
  831. *cs++ = 0;
  832. *cs++ = value;
  833. /* We're thrashing one dword of HWS. */
  834. *cs++ = 0;
  835. return cs;
  836. }
  837. static inline u32 *
  838. gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
  839. {
  840. /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
  841. GEM_BUG_ON(gtt_offset & (1 << 5));
  842. /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
  843. GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
  844. *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
  845. *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
  846. *cs++ = 0;
  847. *cs++ = value;
  848. return cs;
  849. }
  850. bool intel_engine_is_idle(struct intel_engine_cs *engine);
  851. bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
  852. bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
  853. void intel_engines_park(struct drm_i915_private *i915);
  854. void intel_engines_unpark(struct drm_i915_private *i915);
  855. void intel_engines_reset_default_submission(struct drm_i915_private *i915);
  856. unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
  857. bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
  858. void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
  859. struct intel_engine_cs *
  860. intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
  861. static inline void intel_engine_context_in(struct intel_engine_cs *engine)
  862. {
  863. unsigned long flags;
  864. if (READ_ONCE(engine->stats.enabled) == 0)
  865. return;
  866. spin_lock_irqsave(&engine->stats.lock, flags);
  867. if (engine->stats.enabled > 0) {
  868. if (engine->stats.active++ == 0)
  869. engine->stats.start = ktime_get();
  870. GEM_BUG_ON(engine->stats.active == 0);
  871. }
  872. spin_unlock_irqrestore(&engine->stats.lock, flags);
  873. }
  874. static inline void intel_engine_context_out(struct intel_engine_cs *engine)
  875. {
  876. unsigned long flags;
  877. if (READ_ONCE(engine->stats.enabled) == 0)
  878. return;
  879. spin_lock_irqsave(&engine->stats.lock, flags);
  880. if (engine->stats.enabled > 0) {
  881. ktime_t last;
  882. if (engine->stats.active && --engine->stats.active == 0) {
  883. /*
  884. * Decrement the active context count and in case GPU
  885. * is now idle add up to the running total.
  886. */
  887. last = ktime_sub(ktime_get(), engine->stats.start);
  888. engine->stats.total = ktime_add(engine->stats.total,
  889. last);
  890. } else if (engine->stats.active == 0) {
  891. /*
  892. * After turning on engine stats, context out might be
  893. * the first event in which case we account from the
  894. * time stats gathering was turned on.
  895. */
  896. last = ktime_sub(ktime_get(), engine->stats.enabled_at);
  897. engine->stats.total = ktime_add(engine->stats.total,
  898. last);
  899. }
  900. }
  901. spin_unlock_irqrestore(&engine->stats.lock, flags);
  902. }
  903. int intel_enable_engine_stats(struct intel_engine_cs *engine);
  904. void intel_disable_engine_stats(struct intel_engine_cs *engine);
  905. ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
  906. #endif /* _INTEL_RINGBUFFER_H_ */