intel_ringbuffer.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. #ifndef _INTEL_RINGBUFFER_H_
  2. #define _INTEL_RINGBUFFER_H_
  3. #include <linux/hashtable.h>
  4. #include "i915_gem_batch_pool.h"
  5. #include "i915_gem_request.h"
  6. #include "i915_gem_timeline.h"
  7. #include "i915_selftest.h"
  8. #define I915_CMD_HASH_ORDER 9
  9. /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
  10. * but keeps the logic simple. Indeed, the whole purpose of this macro is just
  11. * to give some inclination as to some of the magic values used in the various
  12. * workarounds!
  13. */
  14. #define CACHELINE_BYTES 64
  15. #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
  16. /*
  17. * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
  18. * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
  19. * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
  20. *
  21. * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
  22. * cacheline, the Head Pointer must not be greater than the Tail
  23. * Pointer."
  24. */
  25. #define I915_RING_FREE_SPACE 64
  26. struct intel_hw_status_page {
  27. struct i915_vma *vma;
  28. u32 *page_addr;
  29. u32 ggtt_offset;
  30. };
  31. #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
  32. #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
  33. #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
  34. #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
  35. #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
  36. #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
  37. #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
  38. #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
  39. #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
  40. #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
  41. #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
  42. #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
  43. /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  44. * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
  45. */
  46. #define gen8_semaphore_seqno_size sizeof(uint64_t)
  47. #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
  48. (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
  49. #define GEN8_SIGNAL_OFFSET(__ring, to) \
  50. (dev_priv->semaphore->node.start + \
  51. GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
  52. #define GEN8_WAIT_OFFSET(__ring, from) \
  53. (dev_priv->semaphore->node.start + \
  54. GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
  55. enum intel_engine_hangcheck_action {
  56. ENGINE_IDLE = 0,
  57. ENGINE_WAIT,
  58. ENGINE_ACTIVE_SEQNO,
  59. ENGINE_ACTIVE_HEAD,
  60. ENGINE_ACTIVE_SUBUNITS,
  61. ENGINE_WAIT_KICK,
  62. ENGINE_DEAD,
  63. };
  64. static inline const char *
  65. hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
  66. {
  67. switch (a) {
  68. case ENGINE_IDLE:
  69. return "idle";
  70. case ENGINE_WAIT:
  71. return "wait";
  72. case ENGINE_ACTIVE_SEQNO:
  73. return "active seqno";
  74. case ENGINE_ACTIVE_HEAD:
  75. return "active head";
  76. case ENGINE_ACTIVE_SUBUNITS:
  77. return "active subunits";
  78. case ENGINE_WAIT_KICK:
  79. return "wait kick";
  80. case ENGINE_DEAD:
  81. return "dead";
  82. }
  83. return "unknown";
  84. }
  85. #define I915_MAX_SLICES 3
  86. #define I915_MAX_SUBSLICES 3
  87. #define instdone_slice_mask(dev_priv__) \
  88. (INTEL_GEN(dev_priv__) == 7 ? \
  89. 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
  90. #define instdone_subslice_mask(dev_priv__) \
  91. (INTEL_GEN(dev_priv__) == 7 ? \
  92. 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
  93. #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
  94. for ((slice__) = 0, (subslice__) = 0; \
  95. (slice__) < I915_MAX_SLICES; \
  96. (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
  97. (slice__) += ((subslice__) == 0)) \
  98. for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
  99. (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
  100. struct intel_instdone {
  101. u32 instdone;
  102. /* The following exist only in the RCS engine */
  103. u32 slice_common;
  104. u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
  105. u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
  106. };
  107. struct intel_engine_hangcheck {
  108. u64 acthd;
  109. u32 seqno;
  110. enum intel_engine_hangcheck_action action;
  111. unsigned long action_timestamp;
  112. int deadlock;
  113. struct intel_instdone instdone;
  114. bool stalled;
  115. };
  116. struct intel_ring {
  117. struct i915_vma *vma;
  118. void *vaddr;
  119. struct intel_engine_cs *engine;
  120. struct list_head request_list;
  121. u32 head;
  122. u32 tail;
  123. int space;
  124. int size;
  125. int effective_size;
  126. /** We track the position of the requests in the ring buffer, and
  127. * when each is retired we increment last_retired_head as the GPU
  128. * must have finished processing the request and so we know we
  129. * can advance the ringbuffer up to that position.
  130. *
  131. * last_retired_head is set to -1 after the value is consumed so
  132. * we can detect new retirements.
  133. */
  134. u32 last_retired_head;
  135. };
  136. struct i915_gem_context;
  137. struct drm_i915_reg_table;
  138. /*
  139. * we use a single page to load ctx workarounds so all of these
  140. * values are referred in terms of dwords
  141. *
  142. * struct i915_wa_ctx_bb:
  143. * offset: specifies batch starting position, also helpful in case
  144. * if we want to have multiple batches at different offsets based on
  145. * some criteria. It is not a requirement at the moment but provides
  146. * an option for future use.
  147. * size: size of the batch in DWORDS
  148. */
  149. struct i915_ctx_workarounds {
  150. struct i915_wa_ctx_bb {
  151. u32 offset;
  152. u32 size;
  153. } indirect_ctx, per_ctx;
  154. struct i915_vma *vma;
  155. };
  156. struct drm_i915_gem_request;
  157. struct intel_render_state;
  158. struct intel_engine_cs {
  159. struct drm_i915_private *i915;
  160. const char *name;
  161. enum intel_engine_id {
  162. RCS = 0,
  163. BCS,
  164. VCS,
  165. VCS2, /* Keep instances of the same type engine together. */
  166. VECS
  167. } id;
  168. #define _VCS(n) (VCS + (n))
  169. unsigned int exec_id;
  170. enum intel_engine_hw_id {
  171. RCS_HW = 0,
  172. VCS_HW,
  173. BCS_HW,
  174. VECS_HW,
  175. VCS2_HW
  176. } hw_id;
  177. enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
  178. u32 mmio_base;
  179. unsigned int irq_shift;
  180. struct intel_ring *buffer;
  181. struct intel_timeline *timeline;
  182. struct intel_render_state *render_state;
  183. atomic_t irq_count;
  184. unsigned long irq_posted;
  185. #define ENGINE_IRQ_BREADCRUMB 0
  186. #define ENGINE_IRQ_EXECLIST 1
  187. /* Rather than have every client wait upon all user interrupts,
  188. * with the herd waking after every interrupt and each doing the
  189. * heavyweight seqno dance, we delegate the task (of being the
  190. * bottom-half of the user interrupt) to the first client. After
  191. * every interrupt, we wake up one client, who does the heavyweight
  192. * coherent seqno read and either goes back to sleep (if incomplete),
  193. * or wakes up all the completed clients in parallel, before then
  194. * transferring the bottom-half status to the next client in the queue.
  195. *
  196. * Compared to walking the entire list of waiters in a single dedicated
  197. * bottom-half, we reduce the latency of the first waiter by avoiding
  198. * a context switch, but incur additional coherent seqno reads when
  199. * following the chain of request breadcrumbs. Since it is most likely
  200. * that we have a single client waiting on each seqno, then reducing
  201. * the overhead of waking that client is much preferred.
  202. */
  203. struct intel_breadcrumbs {
  204. spinlock_t irq_lock; /* protects irq_*; irqsafe */
  205. struct intel_wait *irq_wait; /* oldest waiter by retirement */
  206. spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
  207. struct rb_root waiters; /* sorted by retirement, priority */
  208. struct rb_root signals; /* sorted by retirement */
  209. struct task_struct *signaler; /* used for fence signalling */
  210. struct drm_i915_gem_request __rcu *first_signal;
  211. struct timer_list fake_irq; /* used after a missed interrupt */
  212. struct timer_list hangcheck; /* detect missed interrupts */
  213. unsigned int hangcheck_interrupts;
  214. bool irq_armed : 1;
  215. bool irq_enabled : 1;
  216. I915_SELFTEST_DECLARE(bool mock : 1);
  217. } breadcrumbs;
  218. /*
  219. * A pool of objects to use as shadow copies of client batch buffers
  220. * when the command parser is enabled. Prevents the client from
  221. * modifying the batch contents after software parsing.
  222. */
  223. struct i915_gem_batch_pool batch_pool;
  224. struct intel_hw_status_page status_page;
  225. struct i915_ctx_workarounds wa_ctx;
  226. struct i915_vma *scratch;
  227. u32 irq_keep_mask; /* always keep these interrupts */
  228. u32 irq_enable_mask; /* bitmask to enable ring interrupt */
  229. void (*irq_enable)(struct intel_engine_cs *engine);
  230. void (*irq_disable)(struct intel_engine_cs *engine);
  231. int (*init_hw)(struct intel_engine_cs *engine);
  232. void (*reset_hw)(struct intel_engine_cs *engine,
  233. struct drm_i915_gem_request *req);
  234. int (*context_pin)(struct intel_engine_cs *engine,
  235. struct i915_gem_context *ctx);
  236. void (*context_unpin)(struct intel_engine_cs *engine,
  237. struct i915_gem_context *ctx);
  238. int (*request_alloc)(struct drm_i915_gem_request *req);
  239. int (*init_context)(struct drm_i915_gem_request *req);
  240. int (*emit_flush)(struct drm_i915_gem_request *request,
  241. u32 mode);
  242. #define EMIT_INVALIDATE BIT(0)
  243. #define EMIT_FLUSH BIT(1)
  244. #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
  245. int (*emit_bb_start)(struct drm_i915_gem_request *req,
  246. u64 offset, u32 length,
  247. unsigned int dispatch_flags);
  248. #define I915_DISPATCH_SECURE BIT(0)
  249. #define I915_DISPATCH_PINNED BIT(1)
  250. #define I915_DISPATCH_RS BIT(2)
  251. void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
  252. u32 *cs);
  253. int emit_breadcrumb_sz;
  254. /* Pass the request to the hardware queue (e.g. directly into
  255. * the legacy ringbuffer or to the end of an execlist).
  256. *
  257. * This is called from an atomic context with irqs disabled; must
  258. * be irq safe.
  259. */
  260. void (*submit_request)(struct drm_i915_gem_request *req);
  261. /* Call when the priority on a request has changed and it and its
  262. * dependencies may need rescheduling. Note the request itself may
  263. * not be ready to run!
  264. *
  265. * Called under the struct_mutex.
  266. */
  267. void (*schedule)(struct drm_i915_gem_request *request,
  268. int priority);
  269. /* Some chipsets are not quite as coherent as advertised and need
  270. * an expensive kick to force a true read of the up-to-date seqno.
  271. * However, the up-to-date seqno is not always required and the last
  272. * seen value is good enough. Note that the seqno will always be
  273. * monotonic, even if not coherent.
  274. */
  275. void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
  276. void (*cleanup)(struct intel_engine_cs *engine);
  277. /* GEN8 signal/wait table - never trust comments!
  278. * signal to signal to signal to signal to signal to
  279. * RCS VCS BCS VECS VCS2
  280. * --------------------------------------------------------------------
  281. * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
  282. * |-------------------------------------------------------------------
  283. * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
  284. * |-------------------------------------------------------------------
  285. * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
  286. * |-------------------------------------------------------------------
  287. * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
  288. * |-------------------------------------------------------------------
  289. * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
  290. * |-------------------------------------------------------------------
  291. *
  292. * Generalization:
  293. * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
  294. * ie. transpose of g(x, y)
  295. *
  296. * sync from sync from sync from sync from sync from
  297. * RCS VCS BCS VECS VCS2
  298. * --------------------------------------------------------------------
  299. * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
  300. * |-------------------------------------------------------------------
  301. * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
  302. * |-------------------------------------------------------------------
  303. * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
  304. * |-------------------------------------------------------------------
  305. * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
  306. * |-------------------------------------------------------------------
  307. * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
  308. * |-------------------------------------------------------------------
  309. *
  310. * Generalization:
  311. * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
  312. * ie. transpose of f(x, y)
  313. */
  314. struct {
  315. union {
  316. #define GEN6_SEMAPHORE_LAST VECS_HW
  317. #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
  318. #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
  319. struct {
  320. /* our mbox written by others */
  321. u32 wait[GEN6_NUM_SEMAPHORES];
  322. /* mboxes this ring signals to */
  323. i915_reg_t signal[GEN6_NUM_SEMAPHORES];
  324. } mbox;
  325. u64 signal_ggtt[I915_NUM_ENGINES];
  326. };
  327. /* AKA wait() */
  328. int (*sync_to)(struct drm_i915_gem_request *req,
  329. struct drm_i915_gem_request *signal);
  330. u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
  331. } semaphore;
  332. /* Execlists */
  333. struct tasklet_struct irq_tasklet;
  334. struct execlist_port {
  335. struct drm_i915_gem_request *request;
  336. unsigned int count;
  337. GEM_DEBUG_DECL(u32 context_id);
  338. } execlist_port[2];
  339. struct rb_root execlist_queue;
  340. struct rb_node *execlist_first;
  341. unsigned int fw_domains;
  342. /* Contexts are pinned whilst they are active on the GPU. The last
  343. * context executed remains active whilst the GPU is idle - the
  344. * switch away and write to the context object only occurs on the
  345. * next execution. Contexts are only unpinned on retirement of the
  346. * following request ensuring that we can always write to the object
  347. * on the context switch even after idling. Across suspend, we switch
  348. * to the kernel context and trash it as the save may not happen
  349. * before the hardware is powered down.
  350. */
  351. struct i915_gem_context *last_retired_context;
  352. /* We track the current MI_SET_CONTEXT in order to eliminate
  353. * redudant context switches. This presumes that requests are not
  354. * reordered! Or when they are the tracking is updated along with
  355. * the emission of individual requests into the legacy command
  356. * stream (ring).
  357. */
  358. struct i915_gem_context *legacy_active_context;
  359. struct intel_engine_hangcheck hangcheck;
  360. bool needs_cmd_parser;
  361. /*
  362. * Table of commands the command parser needs to know about
  363. * for this engine.
  364. */
  365. DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
  366. /*
  367. * Table of registers allowed in commands that read/write registers.
  368. */
  369. const struct drm_i915_reg_table *reg_tables;
  370. int reg_table_count;
  371. /*
  372. * Returns the bitmask for the length field of the specified command.
  373. * Return 0 for an unrecognized/invalid command.
  374. *
  375. * If the command parser finds an entry for a command in the engine's
  376. * cmd_tables, it gets the command's length based on the table entry.
  377. * If not, it calls this function to determine the per-engine length
  378. * field encoding for the command (i.e. different opcode ranges use
  379. * certain bits to encode the command length in the header).
  380. */
  381. u32 (*get_cmd_length_mask)(u32 cmd_header);
  382. };
  383. static inline unsigned
  384. intel_engine_flag(const struct intel_engine_cs *engine)
  385. {
  386. return 1 << engine->id;
  387. }
  388. static inline void
  389. intel_flush_status_page(struct intel_engine_cs *engine, int reg)
  390. {
  391. mb();
  392. clflush(&engine->status_page.page_addr[reg]);
  393. mb();
  394. }
  395. static inline u32
  396. intel_read_status_page(struct intel_engine_cs *engine, int reg)
  397. {
  398. /* Ensure that the compiler doesn't optimize away the load. */
  399. return READ_ONCE(engine->status_page.page_addr[reg]);
  400. }
  401. static inline void
  402. intel_write_status_page(struct intel_engine_cs *engine,
  403. int reg, u32 value)
  404. {
  405. engine->status_page.page_addr[reg] = value;
  406. }
  407. /*
  408. * Reads a dword out of the status page, which is written to from the command
  409. * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
  410. * MI_STORE_DATA_IMM.
  411. *
  412. * The following dwords have a reserved meaning:
  413. * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
  414. * 0x04: ring 0 head pointer
  415. * 0x05: ring 1 head pointer (915-class)
  416. * 0x06: ring 2 head pointer (915-class)
  417. * 0x10-0x1b: Context status DWords (GM45)
  418. * 0x1f: Last written status offset. (GM45)
  419. * 0x20-0x2f: Reserved (Gen6+)
  420. *
  421. * The area from dword 0x30 to 0x3ff is available for driver usage.
  422. */
  423. #define I915_GEM_HWS_INDEX 0x30
  424. #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
  425. #define I915_GEM_HWS_SCRATCH_INDEX 0x40
  426. #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
  427. struct intel_ring *
  428. intel_engine_create_ring(struct intel_engine_cs *engine, int size);
  429. int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
  430. void intel_ring_unpin(struct intel_ring *ring);
  431. void intel_ring_free(struct intel_ring *ring);
  432. void intel_engine_stop(struct intel_engine_cs *engine);
  433. void intel_engine_cleanup(struct intel_engine_cs *engine);
  434. void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
  435. int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
  436. u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
  437. static inline void
  438. intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
  439. {
  440. /* Dummy function.
  441. *
  442. * This serves as a placeholder in the code so that the reader
  443. * can compare against the preceding intel_ring_begin() and
  444. * check that the number of dwords emitted matches the space
  445. * reserved for the command packet (i.e. the value passed to
  446. * intel_ring_begin()).
  447. */
  448. GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
  449. }
  450. static inline u32
  451. intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
  452. {
  453. /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
  454. u32 offset = addr - req->ring->vaddr;
  455. GEM_BUG_ON(offset > req->ring->size);
  456. return offset & (req->ring->size - 1);
  457. }
  458. void intel_ring_update_space(struct intel_ring *ring);
  459. void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
  460. void intel_engine_setup_common(struct intel_engine_cs *engine);
  461. int intel_engine_init_common(struct intel_engine_cs *engine);
  462. int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
  463. void intel_engine_cleanup_common(struct intel_engine_cs *engine);
  464. int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
  465. int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
  466. int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
  467. int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
  468. int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
  469. u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
  470. u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
  471. static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
  472. {
  473. return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
  474. }
  475. static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
  476. {
  477. /* We are only peeking at the tail of the submit queue (and not the
  478. * queue itself) in order to gain a hint as to the current active
  479. * state of the engine. Callers are not expected to be taking
  480. * engine->timeline->lock, nor are they expected to be concerned
  481. * wtih serialising this hint with anything, so document it as
  482. * a hint and nothing more.
  483. */
  484. return READ_ONCE(engine->timeline->seqno);
  485. }
  486. int init_workarounds_ring(struct intel_engine_cs *engine);
  487. int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
  488. void intel_engine_get_instdone(struct intel_engine_cs *engine,
  489. struct intel_instdone *instdone);
  490. /*
  491. * Arbitrary size for largest possible 'add request' sequence. The code paths
  492. * are complex and variable. Empirical measurement shows that the worst case
  493. * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
  494. * we need to allocate double the largest single packet within that emission
  495. * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
  496. */
  497. #define MIN_SPACE_FOR_ADD_REQUEST 336
  498. static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
  499. {
  500. return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
  501. }
  502. /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
  503. int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
  504. static inline void intel_wait_init(struct intel_wait *wait,
  505. struct drm_i915_gem_request *rq)
  506. {
  507. wait->tsk = current;
  508. wait->request = rq;
  509. }
  510. static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
  511. {
  512. wait->tsk = current;
  513. wait->seqno = seqno;
  514. }
  515. static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
  516. {
  517. return wait->seqno;
  518. }
  519. static inline bool
  520. intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
  521. {
  522. wait->seqno = seqno;
  523. return intel_wait_has_seqno(wait);
  524. }
  525. static inline bool
  526. intel_wait_update_request(struct intel_wait *wait,
  527. const struct drm_i915_gem_request *rq)
  528. {
  529. return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
  530. }
  531. static inline bool
  532. intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
  533. {
  534. return wait->seqno == seqno;
  535. }
  536. static inline bool
  537. intel_wait_check_request(const struct intel_wait *wait,
  538. const struct drm_i915_gem_request *rq)
  539. {
  540. return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
  541. }
  542. static inline bool intel_wait_complete(const struct intel_wait *wait)
  543. {
  544. return RB_EMPTY_NODE(&wait->node);
  545. }
  546. bool intel_engine_add_wait(struct intel_engine_cs *engine,
  547. struct intel_wait *wait);
  548. void intel_engine_remove_wait(struct intel_engine_cs *engine,
  549. struct intel_wait *wait);
  550. void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
  551. void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
  552. static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
  553. {
  554. return READ_ONCE(engine->breadcrumbs.irq_wait);
  555. }
  556. unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
  557. #define ENGINE_WAKEUP_WAITER BIT(0)
  558. #define ENGINE_WAKEUP_ASLEEP BIT(1)
  559. void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
  560. void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
  561. void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
  562. void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
  563. bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
  564. static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
  565. {
  566. memset(batch, 0, 6 * sizeof(u32));
  567. batch[0] = GFX_OP_PIPE_CONTROL(6);
  568. batch[1] = flags;
  569. batch[2] = offset;
  570. return batch + 6;
  571. }
  572. bool intel_engine_is_idle(struct intel_engine_cs *engine);
  573. bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
  574. #endif /* _INTEL_RINGBUFFER_H_ */