intel_lrc.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915
  1. /*
  2. * Copyright © 2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Ben Widawsky <ben@bwidawsk.net>
  25. * Michel Thierry <michel.thierry@intel.com>
  26. * Thomas Daniel <thomas.daniel@intel.com>
  27. * Oscar Mateo <oscar.mateo@intel.com>
  28. *
  29. */
  30. /*
  31. * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
  32. * These expanded contexts enable a number of new abilities, especially
  33. * "Execlists" (also implemented in this file).
  34. *
  35. * Execlists are the new method by which, on gen8+ hardware, workloads are
  36. * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
  37. */
  38. #include <drm/drmP.h>
  39. #include <drm/i915_drm.h>
  40. #include "i915_drv.h"
  41. #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
  42. #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
  43. #define GEN8_LR_CONTEXT_ALIGN 4096
  44. #define RING_ELSP(ring) ((ring)->mmio_base+0x230)
  45. #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
  46. #define CTX_LRI_HEADER_0 0x01
  47. #define CTX_CONTEXT_CONTROL 0x02
  48. #define CTX_RING_HEAD 0x04
  49. #define CTX_RING_TAIL 0x06
  50. #define CTX_RING_BUFFER_START 0x08
  51. #define CTX_RING_BUFFER_CONTROL 0x0a
  52. #define CTX_BB_HEAD_U 0x0c
  53. #define CTX_BB_HEAD_L 0x0e
  54. #define CTX_BB_STATE 0x10
  55. #define CTX_SECOND_BB_HEAD_U 0x12
  56. #define CTX_SECOND_BB_HEAD_L 0x14
  57. #define CTX_SECOND_BB_STATE 0x16
  58. #define CTX_BB_PER_CTX_PTR 0x18
  59. #define CTX_RCS_INDIRECT_CTX 0x1a
  60. #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
  61. #define CTX_LRI_HEADER_1 0x21
  62. #define CTX_CTX_TIMESTAMP 0x22
  63. #define CTX_PDP3_UDW 0x24
  64. #define CTX_PDP3_LDW 0x26
  65. #define CTX_PDP2_UDW 0x28
  66. #define CTX_PDP2_LDW 0x2a
  67. #define CTX_PDP1_UDW 0x2c
  68. #define CTX_PDP1_LDW 0x2e
  69. #define CTX_PDP0_UDW 0x30
  70. #define CTX_PDP0_LDW 0x32
  71. #define CTX_LRI_HEADER_2 0x41
  72. #define CTX_R_PWR_CLK_STATE 0x42
  73. #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
  74. int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
  75. {
  76. WARN_ON(i915.enable_ppgtt == -1);
  77. if (enable_execlists == 0)
  78. return 0;
  79. if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev))
  80. return 1;
  81. return 0;
  82. }
  83. int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
  84. struct intel_engine_cs *ring,
  85. struct intel_context *ctx,
  86. struct drm_i915_gem_execbuffer2 *args,
  87. struct list_head *vmas,
  88. struct drm_i915_gem_object *batch_obj,
  89. u64 exec_start, u32 flags)
  90. {
  91. /* TODO */
  92. return 0;
  93. }
  94. void intel_logical_ring_stop(struct intel_engine_cs *ring)
  95. {
  96. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  97. int ret;
  98. if (!intel_ring_initialized(ring))
  99. return;
  100. ret = intel_ring_idle(ring);
  101. if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
  102. DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
  103. ring->name, ret);
  104. /* TODO: Is this correct with Execlists enabled? */
  105. I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
  106. if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
  107. DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
  108. return;
  109. }
  110. I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
  111. }
  112. void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
  113. {
  114. intel_logical_ring_advance(ringbuf);
  115. if (intel_ring_stopped(ringbuf->ring))
  116. return;
  117. /* TODO: how to submit a context to the ELSP is not here yet */
  118. }
  119. static int logical_ring_alloc_seqno(struct intel_engine_cs *ring)
  120. {
  121. if (ring->outstanding_lazy_seqno)
  122. return 0;
  123. if (ring->preallocated_lazy_request == NULL) {
  124. struct drm_i915_gem_request *request;
  125. request = kmalloc(sizeof(*request), GFP_KERNEL);
  126. if (request == NULL)
  127. return -ENOMEM;
  128. ring->preallocated_lazy_request = request;
  129. }
  130. return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
  131. }
  132. static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
  133. int bytes)
  134. {
  135. struct intel_engine_cs *ring = ringbuf->ring;
  136. struct drm_i915_gem_request *request;
  137. u32 seqno = 0;
  138. int ret;
  139. if (ringbuf->last_retired_head != -1) {
  140. ringbuf->head = ringbuf->last_retired_head;
  141. ringbuf->last_retired_head = -1;
  142. ringbuf->space = intel_ring_space(ringbuf);
  143. if (ringbuf->space >= bytes)
  144. return 0;
  145. }
  146. list_for_each_entry(request, &ring->request_list, list) {
  147. if (__intel_ring_space(request->tail, ringbuf->tail,
  148. ringbuf->size) >= bytes) {
  149. seqno = request->seqno;
  150. break;
  151. }
  152. }
  153. if (seqno == 0)
  154. return -ENOSPC;
  155. ret = i915_wait_seqno(ring, seqno);
  156. if (ret)
  157. return ret;
  158. /* TODO: make sure we update the right ringbuffer's last_retired_head
  159. * when retiring requests */
  160. i915_gem_retire_requests_ring(ring);
  161. ringbuf->head = ringbuf->last_retired_head;
  162. ringbuf->last_retired_head = -1;
  163. ringbuf->space = intel_ring_space(ringbuf);
  164. return 0;
  165. }
  166. static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
  167. int bytes)
  168. {
  169. struct intel_engine_cs *ring = ringbuf->ring;
  170. struct drm_device *dev = ring->dev;
  171. struct drm_i915_private *dev_priv = dev->dev_private;
  172. unsigned long end;
  173. int ret;
  174. ret = logical_ring_wait_request(ringbuf, bytes);
  175. if (ret != -ENOSPC)
  176. return ret;
  177. /* Force the context submission in case we have been skipping it */
  178. intel_logical_ring_advance_and_submit(ringbuf);
  179. /* With GEM the hangcheck timer should kick us out of the loop,
  180. * leaving it early runs the risk of corrupting GEM state (due
  181. * to running on almost untested codepaths). But on resume
  182. * timers don't work yet, so prevent a complete hang in that
  183. * case by choosing an insanely large timeout. */
  184. end = jiffies + 60 * HZ;
  185. do {
  186. ringbuf->head = I915_READ_HEAD(ring);
  187. ringbuf->space = intel_ring_space(ringbuf);
  188. if (ringbuf->space >= bytes) {
  189. ret = 0;
  190. break;
  191. }
  192. msleep(1);
  193. if (dev_priv->mm.interruptible && signal_pending(current)) {
  194. ret = -ERESTARTSYS;
  195. break;
  196. }
  197. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  198. dev_priv->mm.interruptible);
  199. if (ret)
  200. break;
  201. if (time_after(jiffies, end)) {
  202. ret = -EBUSY;
  203. break;
  204. }
  205. } while (1);
  206. return ret;
  207. }
  208. static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
  209. {
  210. uint32_t __iomem *virt;
  211. int rem = ringbuf->size - ringbuf->tail;
  212. if (ringbuf->space < rem) {
  213. int ret = logical_ring_wait_for_space(ringbuf, rem);
  214. if (ret)
  215. return ret;
  216. }
  217. virt = ringbuf->virtual_start + ringbuf->tail;
  218. rem /= 4;
  219. while (rem--)
  220. iowrite32(MI_NOOP, virt++);
  221. ringbuf->tail = 0;
  222. ringbuf->space = intel_ring_space(ringbuf);
  223. return 0;
  224. }
  225. static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
  226. {
  227. int ret;
  228. if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
  229. ret = logical_ring_wrap_buffer(ringbuf);
  230. if (unlikely(ret))
  231. return ret;
  232. }
  233. if (unlikely(ringbuf->space < bytes)) {
  234. ret = logical_ring_wait_for_space(ringbuf, bytes);
  235. if (unlikely(ret))
  236. return ret;
  237. }
  238. return 0;
  239. }
  240. int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
  241. {
  242. struct intel_engine_cs *ring = ringbuf->ring;
  243. struct drm_device *dev = ring->dev;
  244. struct drm_i915_private *dev_priv = dev->dev_private;
  245. int ret;
  246. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  247. dev_priv->mm.interruptible);
  248. if (ret)
  249. return ret;
  250. ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
  251. if (ret)
  252. return ret;
  253. /* Preallocate the olr before touching the ring */
  254. ret = logical_ring_alloc_seqno(ring);
  255. if (ret)
  256. return ret;
  257. ringbuf->space -= num_dwords * sizeof(uint32_t);
  258. return 0;
  259. }
  260. static int gen8_init_common_ring(struct intel_engine_cs *ring)
  261. {
  262. struct drm_device *dev = ring->dev;
  263. struct drm_i915_private *dev_priv = dev->dev_private;
  264. I915_WRITE(RING_MODE_GEN7(ring),
  265. _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
  266. _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
  267. POSTING_READ(RING_MODE_GEN7(ring));
  268. DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
  269. memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
  270. return 0;
  271. }
  272. static int gen8_init_render_ring(struct intel_engine_cs *ring)
  273. {
  274. struct drm_device *dev = ring->dev;
  275. struct drm_i915_private *dev_priv = dev->dev_private;
  276. int ret;
  277. ret = gen8_init_common_ring(ring);
  278. if (ret)
  279. return ret;
  280. /* We need to disable the AsyncFlip performance optimisations in order
  281. * to use MI_WAIT_FOR_EVENT within the CS. It should already be
  282. * programmed to '1' on all products.
  283. *
  284. * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
  285. */
  286. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  287. ret = intel_init_pipe_control(ring);
  288. if (ret)
  289. return ret;
  290. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  291. return ret;
  292. }
  293. static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
  294. u32 invalidate_domains,
  295. u32 unused)
  296. {
  297. struct intel_engine_cs *ring = ringbuf->ring;
  298. struct drm_device *dev = ring->dev;
  299. struct drm_i915_private *dev_priv = dev->dev_private;
  300. uint32_t cmd;
  301. int ret;
  302. ret = intel_logical_ring_begin(ringbuf, 4);
  303. if (ret)
  304. return ret;
  305. cmd = MI_FLUSH_DW + 1;
  306. if (ring == &dev_priv->ring[VCS]) {
  307. if (invalidate_domains & I915_GEM_GPU_DOMAINS)
  308. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
  309. MI_FLUSH_DW_STORE_INDEX |
  310. MI_FLUSH_DW_OP_STOREDW;
  311. } else {
  312. if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
  313. cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
  314. MI_FLUSH_DW_OP_STOREDW;
  315. }
  316. intel_logical_ring_emit(ringbuf, cmd);
  317. intel_logical_ring_emit(ringbuf,
  318. I915_GEM_HWS_SCRATCH_ADDR |
  319. MI_FLUSH_DW_USE_GTT);
  320. intel_logical_ring_emit(ringbuf, 0); /* upper addr */
  321. intel_logical_ring_emit(ringbuf, 0); /* value */
  322. intel_logical_ring_advance(ringbuf);
  323. return 0;
  324. }
  325. static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
  326. u32 invalidate_domains,
  327. u32 flush_domains)
  328. {
  329. struct intel_engine_cs *ring = ringbuf->ring;
  330. u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
  331. u32 flags = 0;
  332. int ret;
  333. flags |= PIPE_CONTROL_CS_STALL;
  334. if (flush_domains) {
  335. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  336. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  337. }
  338. if (invalidate_domains) {
  339. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  340. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  341. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  342. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  343. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  344. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  345. flags |= PIPE_CONTROL_QW_WRITE;
  346. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  347. }
  348. ret = intel_logical_ring_begin(ringbuf, 6);
  349. if (ret)
  350. return ret;
  351. intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
  352. intel_logical_ring_emit(ringbuf, flags);
  353. intel_logical_ring_emit(ringbuf, scratch_addr);
  354. intel_logical_ring_emit(ringbuf, 0);
  355. intel_logical_ring_emit(ringbuf, 0);
  356. intel_logical_ring_emit(ringbuf, 0);
  357. intel_logical_ring_advance(ringbuf);
  358. return 0;
  359. }
  360. static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
  361. {
  362. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  363. }
  364. static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
  365. {
  366. intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
  367. }
  368. static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
  369. {
  370. struct intel_engine_cs *ring = ringbuf->ring;
  371. u32 cmd;
  372. int ret;
  373. ret = intel_logical_ring_begin(ringbuf, 6);
  374. if (ret)
  375. return ret;
  376. cmd = MI_STORE_DWORD_IMM_GEN8;
  377. cmd |= MI_GLOBAL_GTT;
  378. intel_logical_ring_emit(ringbuf, cmd);
  379. intel_logical_ring_emit(ringbuf,
  380. (ring->status_page.gfx_addr +
  381. (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
  382. intel_logical_ring_emit(ringbuf, 0);
  383. intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
  384. intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
  385. intel_logical_ring_emit(ringbuf, MI_NOOP);
  386. intel_logical_ring_advance_and_submit(ringbuf);
  387. return 0;
  388. }
  389. void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
  390. {
  391. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  392. if (!intel_ring_initialized(ring))
  393. return;
  394. intel_logical_ring_stop(ring);
  395. WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
  396. ring->preallocated_lazy_request = NULL;
  397. ring->outstanding_lazy_seqno = 0;
  398. if (ring->cleanup)
  399. ring->cleanup(ring);
  400. i915_cmd_parser_fini_ring(ring);
  401. if (ring->status_page.obj) {
  402. kunmap(sg_page(ring->status_page.obj->pages->sgl));
  403. ring->status_page.obj = NULL;
  404. }
  405. }
  406. static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
  407. {
  408. int ret;
  409. struct intel_context *dctx = ring->default_context;
  410. struct drm_i915_gem_object *dctx_obj;
  411. /* Intentionally left blank. */
  412. ring->buffer = NULL;
  413. ring->dev = dev;
  414. INIT_LIST_HEAD(&ring->active_list);
  415. INIT_LIST_HEAD(&ring->request_list);
  416. init_waitqueue_head(&ring->irq_queue);
  417. ret = intel_lr_context_deferred_create(dctx, ring);
  418. if (ret)
  419. return ret;
  420. /* The status page is offset 0 from the context object in LRCs. */
  421. dctx_obj = dctx->engine[ring->id].state;
  422. ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
  423. ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
  424. if (ring->status_page.page_addr == NULL)
  425. return -ENOMEM;
  426. ring->status_page.obj = dctx_obj;
  427. ret = i915_cmd_parser_init_ring(ring);
  428. if (ret)
  429. return ret;
  430. if (ring->init) {
  431. ret = ring->init(ring);
  432. if (ret)
  433. return ret;
  434. }
  435. return 0;
  436. }
  437. static int logical_render_ring_init(struct drm_device *dev)
  438. {
  439. struct drm_i915_private *dev_priv = dev->dev_private;
  440. struct intel_engine_cs *ring = &dev_priv->ring[RCS];
  441. ring->name = "render ring";
  442. ring->id = RCS;
  443. ring->mmio_base = RENDER_RING_BASE;
  444. ring->irq_enable_mask =
  445. GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
  446. ring->init = gen8_init_render_ring;
  447. ring->cleanup = intel_fini_pipe_control;
  448. ring->get_seqno = gen8_get_seqno;
  449. ring->set_seqno = gen8_set_seqno;
  450. ring->emit_request = gen8_emit_request;
  451. ring->emit_flush = gen8_emit_flush_render;
  452. return logical_ring_init(dev, ring);
  453. }
  454. static int logical_bsd_ring_init(struct drm_device *dev)
  455. {
  456. struct drm_i915_private *dev_priv = dev->dev_private;
  457. struct intel_engine_cs *ring = &dev_priv->ring[VCS];
  458. ring->name = "bsd ring";
  459. ring->id = VCS;
  460. ring->mmio_base = GEN6_BSD_RING_BASE;
  461. ring->irq_enable_mask =
  462. GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
  463. ring->init = gen8_init_common_ring;
  464. ring->get_seqno = gen8_get_seqno;
  465. ring->set_seqno = gen8_set_seqno;
  466. ring->emit_request = gen8_emit_request;
  467. ring->emit_flush = gen8_emit_flush;
  468. return logical_ring_init(dev, ring);
  469. }
  470. static int logical_bsd2_ring_init(struct drm_device *dev)
  471. {
  472. struct drm_i915_private *dev_priv = dev->dev_private;
  473. struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
  474. ring->name = "bds2 ring";
  475. ring->id = VCS2;
  476. ring->mmio_base = GEN8_BSD2_RING_BASE;
  477. ring->irq_enable_mask =
  478. GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
  479. ring->init = gen8_init_common_ring;
  480. ring->get_seqno = gen8_get_seqno;
  481. ring->set_seqno = gen8_set_seqno;
  482. ring->emit_request = gen8_emit_request;
  483. ring->emit_flush = gen8_emit_flush;
  484. return logical_ring_init(dev, ring);
  485. }
  486. static int logical_blt_ring_init(struct drm_device *dev)
  487. {
  488. struct drm_i915_private *dev_priv = dev->dev_private;
  489. struct intel_engine_cs *ring = &dev_priv->ring[BCS];
  490. ring->name = "blitter ring";
  491. ring->id = BCS;
  492. ring->mmio_base = BLT_RING_BASE;
  493. ring->irq_enable_mask =
  494. GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
  495. ring->init = gen8_init_common_ring;
  496. ring->get_seqno = gen8_get_seqno;
  497. ring->set_seqno = gen8_set_seqno;
  498. ring->emit_request = gen8_emit_request;
  499. ring->emit_flush = gen8_emit_flush;
  500. return logical_ring_init(dev, ring);
  501. }
  502. static int logical_vebox_ring_init(struct drm_device *dev)
  503. {
  504. struct drm_i915_private *dev_priv = dev->dev_private;
  505. struct intel_engine_cs *ring = &dev_priv->ring[VECS];
  506. ring->name = "video enhancement ring";
  507. ring->id = VECS;
  508. ring->mmio_base = VEBOX_RING_BASE;
  509. ring->irq_enable_mask =
  510. GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
  511. ring->init = gen8_init_common_ring;
  512. ring->get_seqno = gen8_get_seqno;
  513. ring->set_seqno = gen8_set_seqno;
  514. ring->emit_request = gen8_emit_request;
  515. ring->emit_flush = gen8_emit_flush;
  516. return logical_ring_init(dev, ring);
  517. }
  518. int intel_logical_rings_init(struct drm_device *dev)
  519. {
  520. struct drm_i915_private *dev_priv = dev->dev_private;
  521. int ret;
  522. ret = logical_render_ring_init(dev);
  523. if (ret)
  524. return ret;
  525. if (HAS_BSD(dev)) {
  526. ret = logical_bsd_ring_init(dev);
  527. if (ret)
  528. goto cleanup_render_ring;
  529. }
  530. if (HAS_BLT(dev)) {
  531. ret = logical_blt_ring_init(dev);
  532. if (ret)
  533. goto cleanup_bsd_ring;
  534. }
  535. if (HAS_VEBOX(dev)) {
  536. ret = logical_vebox_ring_init(dev);
  537. if (ret)
  538. goto cleanup_blt_ring;
  539. }
  540. if (HAS_BSD2(dev)) {
  541. ret = logical_bsd2_ring_init(dev);
  542. if (ret)
  543. goto cleanup_vebox_ring;
  544. }
  545. ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
  546. if (ret)
  547. goto cleanup_bsd2_ring;
  548. return 0;
  549. cleanup_bsd2_ring:
  550. intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
  551. cleanup_vebox_ring:
  552. intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
  553. cleanup_blt_ring:
  554. intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
  555. cleanup_bsd_ring:
  556. intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
  557. cleanup_render_ring:
  558. intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
  559. return ret;
  560. }
  561. static int
  562. populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
  563. struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
  564. {
  565. struct drm_i915_gem_object *ring_obj = ringbuf->obj;
  566. struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
  567. struct page *page;
  568. uint32_t *reg_state;
  569. int ret;
  570. ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
  571. if (ret) {
  572. DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
  573. return ret;
  574. }
  575. ret = i915_gem_object_get_pages(ctx_obj);
  576. if (ret) {
  577. DRM_DEBUG_DRIVER("Could not get object pages\n");
  578. return ret;
  579. }
  580. i915_gem_object_pin_pages(ctx_obj);
  581. /* The second page of the context object contains some fields which must
  582. * be set up prior to the first execution. */
  583. page = i915_gem_object_get_page(ctx_obj, 1);
  584. reg_state = kmap_atomic(page);
  585. /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
  586. * commands followed by (reg, value) pairs. The values we are setting here are
  587. * only for the first context restore: on a subsequent save, the GPU will
  588. * recreate this batchbuffer with new values (including all the missing
  589. * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
  590. if (ring->id == RCS)
  591. reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
  592. else
  593. reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
  594. reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
  595. reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
  596. reg_state[CTX_CONTEXT_CONTROL+1] =
  597. _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
  598. reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
  599. reg_state[CTX_RING_HEAD+1] = 0;
  600. reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
  601. reg_state[CTX_RING_TAIL+1] = 0;
  602. reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
  603. reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
  604. reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
  605. reg_state[CTX_RING_BUFFER_CONTROL+1] =
  606. ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
  607. reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
  608. reg_state[CTX_BB_HEAD_U+1] = 0;
  609. reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
  610. reg_state[CTX_BB_HEAD_L+1] = 0;
  611. reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
  612. reg_state[CTX_BB_STATE+1] = (1<<5);
  613. reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
  614. reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
  615. reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
  616. reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
  617. reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
  618. reg_state[CTX_SECOND_BB_STATE+1] = 0;
  619. if (ring->id == RCS) {
  620. /* TODO: according to BSpec, the register state context
  621. * for CHV does not have these. OTOH, these registers do
  622. * exist in CHV. I'm waiting for a clarification */
  623. reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
  624. reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
  625. reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
  626. reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
  627. reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
  628. reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
  629. }
  630. reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
  631. reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
  632. reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
  633. reg_state[CTX_CTX_TIMESTAMP+1] = 0;
  634. reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
  635. reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
  636. reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
  637. reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
  638. reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
  639. reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
  640. reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
  641. reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
  642. reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
  643. reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
  644. reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
  645. reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
  646. reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
  647. reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
  648. reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
  649. reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
  650. if (ring->id == RCS) {
  651. reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
  652. reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
  653. reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
  654. }
  655. kunmap_atomic(reg_state);
  656. ctx_obj->dirty = 1;
  657. set_page_dirty(page);
  658. i915_gem_object_unpin_pages(ctx_obj);
  659. return 0;
  660. }
  661. void intel_lr_context_free(struct intel_context *ctx)
  662. {
  663. int i;
  664. for (i = 0; i < I915_NUM_RINGS; i++) {
  665. struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
  666. struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
  667. if (ctx_obj) {
  668. intel_destroy_ringbuffer_obj(ringbuf);
  669. kfree(ringbuf);
  670. i915_gem_object_ggtt_unpin(ctx_obj);
  671. drm_gem_object_unreference(&ctx_obj->base);
  672. }
  673. }
  674. }
  675. static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
  676. {
  677. int ret = 0;
  678. WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
  679. switch (ring->id) {
  680. case RCS:
  681. ret = GEN8_LR_CONTEXT_RENDER_SIZE;
  682. break;
  683. case VCS:
  684. case BCS:
  685. case VECS:
  686. case VCS2:
  687. ret = GEN8_LR_CONTEXT_OTHER_SIZE;
  688. break;
  689. }
  690. return ret;
  691. }
  692. int intel_lr_context_deferred_create(struct intel_context *ctx,
  693. struct intel_engine_cs *ring)
  694. {
  695. struct drm_device *dev = ring->dev;
  696. struct drm_i915_gem_object *ctx_obj;
  697. uint32_t context_size;
  698. struct intel_ringbuffer *ringbuf;
  699. int ret;
  700. WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
  701. if (ctx->engine[ring->id].state)
  702. return 0;
  703. context_size = round_up(get_lr_context_size(ring), 4096);
  704. ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
  705. if (IS_ERR(ctx_obj)) {
  706. ret = PTR_ERR(ctx_obj);
  707. DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
  708. return ret;
  709. }
  710. ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
  711. if (ret) {
  712. DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
  713. drm_gem_object_unreference(&ctx_obj->base);
  714. return ret;
  715. }
  716. ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
  717. if (!ringbuf) {
  718. DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
  719. ring->name);
  720. i915_gem_object_ggtt_unpin(ctx_obj);
  721. drm_gem_object_unreference(&ctx_obj->base);
  722. ret = -ENOMEM;
  723. return ret;
  724. }
  725. ringbuf->ring = ring;
  726. ringbuf->size = 32 * PAGE_SIZE;
  727. ringbuf->effective_size = ringbuf->size;
  728. ringbuf->head = 0;
  729. ringbuf->tail = 0;
  730. ringbuf->space = ringbuf->size;
  731. ringbuf->last_retired_head = -1;
  732. /* TODO: For now we put this in the mappable region so that we can reuse
  733. * the existing ringbuffer code which ioremaps it. When we start
  734. * creating many contexts, this will no longer work and we must switch
  735. * to a kmapish interface.
  736. */
  737. ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
  738. if (ret) {
  739. DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
  740. ring->name, ret);
  741. goto error;
  742. }
  743. ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
  744. if (ret) {
  745. DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
  746. intel_destroy_ringbuffer_obj(ringbuf);
  747. goto error;
  748. }
  749. ctx->engine[ring->id].ringbuf = ringbuf;
  750. ctx->engine[ring->id].state = ctx_obj;
  751. return 0;
  752. error:
  753. kfree(ringbuf);
  754. i915_gem_object_ggtt_unpin(ctx_obj);
  755. drm_gem_object_unreference(&ctx_obj->base);
  756. return ret;
  757. }