intel_ringbuffer.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. #ifndef _INTEL_RINGBUFFER_H_
  2. #define _INTEL_RINGBUFFER_H_
  3. #include <linux/hashtable.h>
  4. #define I915_CMD_HASH_ORDER 9
  5. /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
  6. * but keeps the logic simple. Indeed, the whole purpose of this macro is just
  7. * to give some inclination as to some of the magic values used in the various
  8. * workarounds!
  9. */
  10. #define CACHELINE_BYTES 64
  11. /*
  12. * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
  13. * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
  14. * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
  15. *
  16. * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
  17. * cacheline, the Head Pointer must not be greater than the Tail
  18. * Pointer."
  19. */
  20. #define I915_RING_FREE_SPACE 64
  21. struct intel_hw_status_page {
  22. u32 *page_addr;
  23. unsigned int gfx_addr;
  24. struct drm_i915_gem_object *obj;
  25. };
  26. #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
  27. #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
  28. #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
  29. #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
  30. #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
  31. #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
  32. #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
  33. #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
  34. #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
  35. #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
  36. #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
  37. #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
  38. /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  39. * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
  40. */
  41. #define i915_semaphore_seqno_size sizeof(uint64_t)
  42. #define GEN8_SIGNAL_OFFSET(__ring, to) \
  43. (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
  44. ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
  45. (i915_semaphore_seqno_size * (to)))
  46. #define GEN8_WAIT_OFFSET(__ring, from) \
  47. (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
  48. ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
  49. (i915_semaphore_seqno_size * (__ring)->id))
  50. #define GEN8_RING_SEMAPHORE_INIT do { \
  51. if (!dev_priv->semaphore_obj) { \
  52. break; \
  53. } \
  54. ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
  55. ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
  56. ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
  57. ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
  58. ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
  59. ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
  60. } while(0)
  61. enum intel_ring_hangcheck_action {
  62. HANGCHECK_IDLE = 0,
  63. HANGCHECK_WAIT,
  64. HANGCHECK_ACTIVE,
  65. HANGCHECK_ACTIVE_LOOP,
  66. HANGCHECK_KICK,
  67. HANGCHECK_HUNG,
  68. };
  69. #define HANGCHECK_SCORE_RING_HUNG 31
  70. struct intel_ring_hangcheck {
  71. u64 acthd;
  72. u64 max_acthd;
  73. u32 seqno;
  74. int score;
  75. enum intel_ring_hangcheck_action action;
  76. int deadlock;
  77. };
  78. struct intel_ringbuffer {
  79. struct drm_i915_gem_object *obj;
  80. void __iomem *virtual_start;
  81. struct intel_engine_cs *ring;
  82. u32 head;
  83. u32 tail;
  84. int space;
  85. int size;
  86. int effective_size;
  87. /** We track the position of the requests in the ring buffer, and
  88. * when each is retired we increment last_retired_head as the GPU
  89. * must have finished processing the request and so we know we
  90. * can advance the ringbuffer up to that position.
  91. *
  92. * last_retired_head is set to -1 after the value is consumed so
  93. * we can detect new retirements.
  94. */
  95. u32 last_retired_head;
  96. };
  97. struct intel_context;
  98. struct intel_engine_cs {
  99. const char *name;
  100. enum intel_ring_id {
  101. RCS = 0x0,
  102. VCS,
  103. BCS,
  104. VECS,
  105. VCS2
  106. } id;
  107. #define I915_NUM_RINGS 5
  108. #define LAST_USER_RING (VECS + 1)
  109. u32 mmio_base;
  110. struct drm_device *dev;
  111. struct intel_ringbuffer *buffer;
  112. struct intel_hw_status_page status_page;
  113. unsigned irq_refcount; /* protected by dev_priv->irq_lock */
  114. u32 irq_enable_mask; /* bitmask to enable ring interrupt */
  115. struct drm_i915_gem_request *trace_irq_req;
  116. bool __must_check (*irq_get)(struct intel_engine_cs *ring);
  117. void (*irq_put)(struct intel_engine_cs *ring);
  118. int (*init_hw)(struct intel_engine_cs *ring);
  119. int (*init_context)(struct intel_engine_cs *ring,
  120. struct intel_context *ctx);
  121. void (*write_tail)(struct intel_engine_cs *ring,
  122. u32 value);
  123. int __must_check (*flush)(struct intel_engine_cs *ring,
  124. u32 invalidate_domains,
  125. u32 flush_domains);
  126. int (*add_request)(struct intel_engine_cs *ring);
  127. /* Some chipsets are not quite as coherent as advertised and need
  128. * an expensive kick to force a true read of the up-to-date seqno.
  129. * However, the up-to-date seqno is not always required and the last
  130. * seen value is good enough. Note that the seqno will always be
  131. * monotonic, even if not coherent.
  132. */
  133. u32 (*get_seqno)(struct intel_engine_cs *ring,
  134. bool lazy_coherency);
  135. void (*set_seqno)(struct intel_engine_cs *ring,
  136. u32 seqno);
  137. int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
  138. u64 offset, u32 length,
  139. unsigned dispatch_flags);
  140. #define I915_DISPATCH_SECURE 0x1
  141. #define I915_DISPATCH_PINNED 0x2
  142. void (*cleanup)(struct intel_engine_cs *ring);
  143. /* GEN8 signal/wait table - never trust comments!
  144. * signal to signal to signal to signal to signal to
  145. * RCS VCS BCS VECS VCS2
  146. * --------------------------------------------------------------------
  147. * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
  148. * |-------------------------------------------------------------------
  149. * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
  150. * |-------------------------------------------------------------------
  151. * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
  152. * |-------------------------------------------------------------------
  153. * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
  154. * |-------------------------------------------------------------------
  155. * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
  156. * |-------------------------------------------------------------------
  157. *
  158. * Generalization:
  159. * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
  160. * ie. transpose of g(x, y)
  161. *
  162. * sync from sync from sync from sync from sync from
  163. * RCS VCS BCS VECS VCS2
  164. * --------------------------------------------------------------------
  165. * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
  166. * |-------------------------------------------------------------------
  167. * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
  168. * |-------------------------------------------------------------------
  169. * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
  170. * |-------------------------------------------------------------------
  171. * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
  172. * |-------------------------------------------------------------------
  173. * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
  174. * |-------------------------------------------------------------------
  175. *
  176. * Generalization:
  177. * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
  178. * ie. transpose of f(x, y)
  179. */
  180. struct {
  181. u32 sync_seqno[I915_NUM_RINGS-1];
  182. union {
  183. struct {
  184. /* our mbox written by others */
  185. u32 wait[I915_NUM_RINGS];
  186. /* mboxes this ring signals to */
  187. u32 signal[I915_NUM_RINGS];
  188. } mbox;
  189. u64 signal_ggtt[I915_NUM_RINGS];
  190. };
  191. /* AKA wait() */
  192. int (*sync_to)(struct intel_engine_cs *ring,
  193. struct intel_engine_cs *to,
  194. u32 seqno);
  195. int (*signal)(struct intel_engine_cs *signaller,
  196. /* num_dwords needed by caller */
  197. unsigned int num_dwords);
  198. } semaphore;
  199. /* Execlists */
  200. spinlock_t execlist_lock;
  201. struct list_head execlist_queue;
  202. struct list_head execlist_retired_req_list;
  203. u8 next_context_status_buffer;
  204. u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
  205. int (*emit_request)(struct intel_ringbuffer *ringbuf,
  206. struct drm_i915_gem_request *request);
  207. int (*emit_flush)(struct intel_ringbuffer *ringbuf,
  208. struct intel_context *ctx,
  209. u32 invalidate_domains,
  210. u32 flush_domains);
  211. int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
  212. struct intel_context *ctx,
  213. u64 offset, unsigned dispatch_flags);
  214. /**
  215. * List of objects currently involved in rendering from the
  216. * ringbuffer.
  217. *
  218. * Includes buffers having the contents of their GPU caches
  219. * flushed, not necessarily primitives. last_read_req
  220. * represents when the rendering involved will be completed.
  221. *
  222. * A reference is held on the buffer while on this list.
  223. */
  224. struct list_head active_list;
  225. /**
  226. * List of breadcrumbs associated with GPU requests currently
  227. * outstanding.
  228. */
  229. struct list_head request_list;
  230. /**
  231. * Do we have some not yet emitted requests outstanding?
  232. */
  233. struct drm_i915_gem_request *outstanding_lazy_request;
  234. bool gpu_caches_dirty;
  235. wait_queue_head_t irq_queue;
  236. struct intel_context *default_context;
  237. struct intel_context *last_context;
  238. struct intel_ring_hangcheck hangcheck;
  239. struct {
  240. struct drm_i915_gem_object *obj;
  241. u32 gtt_offset;
  242. volatile u32 *cpu_page;
  243. } scratch;
  244. bool needs_cmd_parser;
  245. /*
  246. * Table of commands the command parser needs to know about
  247. * for this ring.
  248. */
  249. DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
  250. /*
  251. * Table of registers allowed in commands that read/write registers.
  252. */
  253. const u32 *reg_table;
  254. int reg_count;
  255. /*
  256. * Table of registers allowed in commands that read/write registers, but
  257. * only from the DRM master.
  258. */
  259. const u32 *master_reg_table;
  260. int master_reg_count;
  261. /*
  262. * Returns the bitmask for the length field of the specified command.
  263. * Return 0 for an unrecognized/invalid command.
  264. *
  265. * If the command parser finds an entry for a command in the ring's
  266. * cmd_tables, it gets the command's length based on the table entry.
  267. * If not, it calls this function to determine the per-ring length field
  268. * encoding for the command (i.e. certain opcode ranges use certain bits
  269. * to encode the command length in the header).
  270. */
  271. u32 (*get_cmd_length_mask)(u32 cmd_header);
  272. };
  273. bool intel_ring_initialized(struct intel_engine_cs *ring);
  274. static inline unsigned
  275. intel_ring_flag(struct intel_engine_cs *ring)
  276. {
  277. return 1 << ring->id;
  278. }
  279. static inline u32
  280. intel_ring_sync_index(struct intel_engine_cs *ring,
  281. struct intel_engine_cs *other)
  282. {
  283. int idx;
  284. /*
  285. * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
  286. * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
  287. * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
  288. * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
  289. * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
  290. */
  291. idx = (other - ring) - 1;
  292. if (idx < 0)
  293. idx += I915_NUM_RINGS;
  294. return idx;
  295. }
  296. static inline u32
  297. intel_read_status_page(struct intel_engine_cs *ring,
  298. int reg)
  299. {
  300. /* Ensure that the compiler doesn't optimize away the load. */
  301. barrier();
  302. return ring->status_page.page_addr[reg];
  303. }
  304. static inline void
  305. intel_write_status_page(struct intel_engine_cs *ring,
  306. int reg, u32 value)
  307. {
  308. ring->status_page.page_addr[reg] = value;
  309. }
  310. /**
  311. * Reads a dword out of the status page, which is written to from the command
  312. * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
  313. * MI_STORE_DATA_IMM.
  314. *
  315. * The following dwords have a reserved meaning:
  316. * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
  317. * 0x04: ring 0 head pointer
  318. * 0x05: ring 1 head pointer (915-class)
  319. * 0x06: ring 2 head pointer (915-class)
  320. * 0x10-0x1b: Context status DWords (GM45)
  321. * 0x1f: Last written status offset. (GM45)
  322. * 0x20-0x2f: Reserved (Gen6+)
  323. *
  324. * The area from dword 0x30 to 0x3ff is available for driver usage.
  325. */
  326. #define I915_GEM_HWS_INDEX 0x30
  327. #define I915_GEM_HWS_SCRATCH_INDEX 0x40
  328. #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
  329. void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
  330. int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
  331. struct intel_ringbuffer *ringbuf);
  332. void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
  333. int intel_alloc_ringbuffer_obj(struct drm_device *dev,
  334. struct intel_ringbuffer *ringbuf);
  335. void intel_stop_ring_buffer(struct intel_engine_cs *ring);
  336. void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
  337. int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
  338. int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
  339. static inline void intel_ring_emit(struct intel_engine_cs *ring,
  340. u32 data)
  341. {
  342. struct intel_ringbuffer *ringbuf = ring->buffer;
  343. iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
  344. ringbuf->tail += 4;
  345. }
  346. static inline void intel_ring_advance(struct intel_engine_cs *ring)
  347. {
  348. struct intel_ringbuffer *ringbuf = ring->buffer;
  349. ringbuf->tail &= ringbuf->size - 1;
  350. }
  351. int __intel_ring_space(int head, int tail, int size);
  352. void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
  353. int intel_ring_space(struct intel_ringbuffer *ringbuf);
  354. bool intel_ring_stopped(struct intel_engine_cs *ring);
  355. void __intel_ring_advance(struct intel_engine_cs *ring);
  356. int __must_check intel_ring_idle(struct intel_engine_cs *ring);
  357. void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
  358. int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
  359. int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
  360. void intel_fini_pipe_control(struct intel_engine_cs *ring);
  361. int intel_init_pipe_control(struct intel_engine_cs *ring);
  362. int intel_init_render_ring_buffer(struct drm_device *dev);
  363. int intel_init_bsd_ring_buffer(struct drm_device *dev);
  364. int intel_init_bsd2_ring_buffer(struct drm_device *dev);
  365. int intel_init_blt_ring_buffer(struct drm_device *dev);
  366. int intel_init_vebox_ring_buffer(struct drm_device *dev);
  367. u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
  368. int init_workarounds_ring(struct intel_engine_cs *ring);
  369. static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
  370. {
  371. return ringbuf->tail;
  372. }
  373. static inline struct drm_i915_gem_request *
  374. intel_ring_get_request(struct intel_engine_cs *ring)
  375. {
  376. BUG_ON(ring->outstanding_lazy_request == NULL);
  377. return ring->outstanding_lazy_request;
  378. }
  379. #endif /* _INTEL_RINGBUFFER_H_ */