intel_lrc.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. /*
  2. * Copyright © 2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Ben Widawsky <ben@bwidawsk.net>
  25. * Michel Thierry <michel.thierry@intel.com>
  26. * Thomas Daniel <thomas.daniel@intel.com>
  27. * Oscar Mateo <oscar.mateo@intel.com>
  28. *
  29. */
  30. /*
  31. * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
  32. * These expanded contexts enable a number of new abilities, especially
  33. * "Execlists" (also implemented in this file).
  34. *
  35. * Execlists are the new method by which, on gen8+ hardware, workloads are
  36. * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
  37. */
  38. #include <drm/drmP.h>
  39. #include <drm/i915_drm.h>
  40. #include "i915_drv.h"
  41. #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
  42. #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
  43. #define GEN8_LR_CONTEXT_ALIGN 4096
  44. #define RING_ELSP(ring) ((ring)->mmio_base+0x230)
  45. #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
  46. #define CTX_LRI_HEADER_0 0x01
  47. #define CTX_CONTEXT_CONTROL 0x02
  48. #define CTX_RING_HEAD 0x04
  49. #define CTX_RING_TAIL 0x06
  50. #define CTX_RING_BUFFER_START 0x08
  51. #define CTX_RING_BUFFER_CONTROL 0x0a
  52. #define CTX_BB_HEAD_U 0x0c
  53. #define CTX_BB_HEAD_L 0x0e
  54. #define CTX_BB_STATE 0x10
  55. #define CTX_SECOND_BB_HEAD_U 0x12
  56. #define CTX_SECOND_BB_HEAD_L 0x14
  57. #define CTX_SECOND_BB_STATE 0x16
  58. #define CTX_BB_PER_CTX_PTR 0x18
  59. #define CTX_RCS_INDIRECT_CTX 0x1a
  60. #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
  61. #define CTX_LRI_HEADER_1 0x21
  62. #define CTX_CTX_TIMESTAMP 0x22
  63. #define CTX_PDP3_UDW 0x24
  64. #define CTX_PDP3_LDW 0x26
  65. #define CTX_PDP2_UDW 0x28
  66. #define CTX_PDP2_LDW 0x2a
  67. #define CTX_PDP1_UDW 0x2c
  68. #define CTX_PDP1_LDW 0x2e
  69. #define CTX_PDP0_UDW 0x30
  70. #define CTX_PDP0_LDW 0x32
  71. #define CTX_LRI_HEADER_2 0x41
  72. #define CTX_R_PWR_CLK_STATE 0x42
  73. #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
  74. int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
  75. {
  76. WARN_ON(i915.enable_ppgtt == -1);
  77. if (enable_execlists == 0)
  78. return 0;
  79. if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
  80. i915.use_mmio_flip >= 0)
  81. return 1;
  82. return 0;
  83. }
  84. static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
  85. {
  86. struct intel_engine_cs *ring = ringbuf->ring;
  87. uint32_t flush_domains;
  88. int ret;
  89. flush_domains = 0;
  90. if (ring->gpu_caches_dirty)
  91. flush_domains = I915_GEM_GPU_DOMAINS;
  92. ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
  93. if (ret)
  94. return ret;
  95. ring->gpu_caches_dirty = false;
  96. return 0;
  97. }
  98. static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
  99. struct list_head *vmas)
  100. {
  101. struct intel_engine_cs *ring = ringbuf->ring;
  102. struct i915_vma *vma;
  103. uint32_t flush_domains = 0;
  104. bool flush_chipset = false;
  105. int ret;
  106. list_for_each_entry(vma, vmas, exec_list) {
  107. struct drm_i915_gem_object *obj = vma->obj;
  108. ret = i915_gem_object_sync(obj, ring);
  109. if (ret)
  110. return ret;
  111. if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
  112. flush_chipset |= i915_gem_clflush_object(obj, false);
  113. flush_domains |= obj->base.write_domain;
  114. }
  115. if (flush_domains & I915_GEM_DOMAIN_GTT)
  116. wmb();
  117. /* Unconditionally invalidate gpu caches and ensure that we do flush
  118. * any residual writes from the previous batch.
  119. */
  120. return logical_ring_invalidate_all_caches(ringbuf);
  121. }
  122. int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
  123. struct intel_engine_cs *ring,
  124. struct intel_context *ctx,
  125. struct drm_i915_gem_execbuffer2 *args,
  126. struct list_head *vmas,
  127. struct drm_i915_gem_object *batch_obj,
  128. u64 exec_start, u32 flags)
  129. {
  130. struct drm_i915_private *dev_priv = dev->dev_private;
  131. struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
  132. int instp_mode;
  133. u32 instp_mask;
  134. int ret;
  135. instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
  136. instp_mask = I915_EXEC_CONSTANTS_MASK;
  137. switch (instp_mode) {
  138. case I915_EXEC_CONSTANTS_REL_GENERAL:
  139. case I915_EXEC_CONSTANTS_ABSOLUTE:
  140. case I915_EXEC_CONSTANTS_REL_SURFACE:
  141. if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
  142. DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
  143. return -EINVAL;
  144. }
  145. if (instp_mode != dev_priv->relative_constants_mode) {
  146. if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
  147. DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
  148. return -EINVAL;
  149. }
  150. /* The HW changed the meaning on this bit on gen6 */
  151. instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
  152. }
  153. break;
  154. default:
  155. DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
  156. return -EINVAL;
  157. }
  158. if (args->num_cliprects != 0) {
  159. DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
  160. return -EINVAL;
  161. } else {
  162. if (args->DR4 == 0xffffffff) {
  163. DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
  164. args->DR4 = 0;
  165. }
  166. if (args->DR1 || args->DR4 || args->cliprects_ptr) {
  167. DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
  168. return -EINVAL;
  169. }
  170. }
  171. if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
  172. DRM_DEBUG("sol reset is gen7 only\n");
  173. return -EINVAL;
  174. }
  175. ret = execlists_move_to_gpu(ringbuf, vmas);
  176. if (ret)
  177. return ret;
  178. if (ring == &dev_priv->ring[RCS] &&
  179. instp_mode != dev_priv->relative_constants_mode) {
  180. ret = intel_logical_ring_begin(ringbuf, 4);
  181. if (ret)
  182. return ret;
  183. intel_logical_ring_emit(ringbuf, MI_NOOP);
  184. intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
  185. intel_logical_ring_emit(ringbuf, INSTPM);
  186. intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
  187. intel_logical_ring_advance(ringbuf);
  188. dev_priv->relative_constants_mode = instp_mode;
  189. }
  190. ret = ring->emit_bb_start(ringbuf, exec_start, flags);
  191. if (ret)
  192. return ret;
  193. i915_gem_execbuffer_move_to_active(vmas, ring);
  194. i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
  195. return 0;
  196. }
  197. void intel_logical_ring_stop(struct intel_engine_cs *ring)
  198. {
  199. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  200. int ret;
  201. if (!intel_ring_initialized(ring))
  202. return;
  203. ret = intel_ring_idle(ring);
  204. if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
  205. DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
  206. ring->name, ret);
  207. /* TODO: Is this correct with Execlists enabled? */
  208. I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
  209. if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
  210. DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
  211. return;
  212. }
  213. I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
  214. }
  215. void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
  216. {
  217. intel_logical_ring_advance(ringbuf);
  218. if (intel_ring_stopped(ringbuf->ring))
  219. return;
  220. /* TODO: how to submit a context to the ELSP is not here yet */
  221. }
  222. static int logical_ring_alloc_seqno(struct intel_engine_cs *ring)
  223. {
  224. if (ring->outstanding_lazy_seqno)
  225. return 0;
  226. if (ring->preallocated_lazy_request == NULL) {
  227. struct drm_i915_gem_request *request;
  228. request = kmalloc(sizeof(*request), GFP_KERNEL);
  229. if (request == NULL)
  230. return -ENOMEM;
  231. ring->preallocated_lazy_request = request;
  232. }
  233. return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
  234. }
  235. static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
  236. int bytes)
  237. {
  238. struct intel_engine_cs *ring = ringbuf->ring;
  239. struct drm_i915_gem_request *request;
  240. u32 seqno = 0;
  241. int ret;
  242. if (ringbuf->last_retired_head != -1) {
  243. ringbuf->head = ringbuf->last_retired_head;
  244. ringbuf->last_retired_head = -1;
  245. ringbuf->space = intel_ring_space(ringbuf);
  246. if (ringbuf->space >= bytes)
  247. return 0;
  248. }
  249. list_for_each_entry(request, &ring->request_list, list) {
  250. if (__intel_ring_space(request->tail, ringbuf->tail,
  251. ringbuf->size) >= bytes) {
  252. seqno = request->seqno;
  253. break;
  254. }
  255. }
  256. if (seqno == 0)
  257. return -ENOSPC;
  258. ret = i915_wait_seqno(ring, seqno);
  259. if (ret)
  260. return ret;
  261. /* TODO: make sure we update the right ringbuffer's last_retired_head
  262. * when retiring requests */
  263. i915_gem_retire_requests_ring(ring);
  264. ringbuf->head = ringbuf->last_retired_head;
  265. ringbuf->last_retired_head = -1;
  266. ringbuf->space = intel_ring_space(ringbuf);
  267. return 0;
  268. }
  269. static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
  270. int bytes)
  271. {
  272. struct intel_engine_cs *ring = ringbuf->ring;
  273. struct drm_device *dev = ring->dev;
  274. struct drm_i915_private *dev_priv = dev->dev_private;
  275. unsigned long end;
  276. int ret;
  277. ret = logical_ring_wait_request(ringbuf, bytes);
  278. if (ret != -ENOSPC)
  279. return ret;
  280. /* Force the context submission in case we have been skipping it */
  281. intel_logical_ring_advance_and_submit(ringbuf);
  282. /* With GEM the hangcheck timer should kick us out of the loop,
  283. * leaving it early runs the risk of corrupting GEM state (due
  284. * to running on almost untested codepaths). But on resume
  285. * timers don't work yet, so prevent a complete hang in that
  286. * case by choosing an insanely large timeout. */
  287. end = jiffies + 60 * HZ;
  288. do {
  289. ringbuf->head = I915_READ_HEAD(ring);
  290. ringbuf->space = intel_ring_space(ringbuf);
  291. if (ringbuf->space >= bytes) {
  292. ret = 0;
  293. break;
  294. }
  295. msleep(1);
  296. if (dev_priv->mm.interruptible && signal_pending(current)) {
  297. ret = -ERESTARTSYS;
  298. break;
  299. }
  300. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  301. dev_priv->mm.interruptible);
  302. if (ret)
  303. break;
  304. if (time_after(jiffies, end)) {
  305. ret = -EBUSY;
  306. break;
  307. }
  308. } while (1);
  309. return ret;
  310. }
  311. static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
  312. {
  313. uint32_t __iomem *virt;
  314. int rem = ringbuf->size - ringbuf->tail;
  315. if (ringbuf->space < rem) {
  316. int ret = logical_ring_wait_for_space(ringbuf, rem);
  317. if (ret)
  318. return ret;
  319. }
  320. virt = ringbuf->virtual_start + ringbuf->tail;
  321. rem /= 4;
  322. while (rem--)
  323. iowrite32(MI_NOOP, virt++);
  324. ringbuf->tail = 0;
  325. ringbuf->space = intel_ring_space(ringbuf);
  326. return 0;
  327. }
  328. static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
  329. {
  330. int ret;
  331. if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
  332. ret = logical_ring_wrap_buffer(ringbuf);
  333. if (unlikely(ret))
  334. return ret;
  335. }
  336. if (unlikely(ringbuf->space < bytes)) {
  337. ret = logical_ring_wait_for_space(ringbuf, bytes);
  338. if (unlikely(ret))
  339. return ret;
  340. }
  341. return 0;
  342. }
  343. int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
  344. {
  345. struct intel_engine_cs *ring = ringbuf->ring;
  346. struct drm_device *dev = ring->dev;
  347. struct drm_i915_private *dev_priv = dev->dev_private;
  348. int ret;
  349. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  350. dev_priv->mm.interruptible);
  351. if (ret)
  352. return ret;
  353. ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
  354. if (ret)
  355. return ret;
  356. /* Preallocate the olr before touching the ring */
  357. ret = logical_ring_alloc_seqno(ring);
  358. if (ret)
  359. return ret;
  360. ringbuf->space -= num_dwords * sizeof(uint32_t);
  361. return 0;
  362. }
  363. static int gen8_init_common_ring(struct intel_engine_cs *ring)
  364. {
  365. struct drm_device *dev = ring->dev;
  366. struct drm_i915_private *dev_priv = dev->dev_private;
  367. I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
  368. I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
  369. I915_WRITE(RING_MODE_GEN7(ring),
  370. _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
  371. _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
  372. POSTING_READ(RING_MODE_GEN7(ring));
  373. DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
  374. memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
  375. return 0;
  376. }
  377. static int gen8_init_render_ring(struct intel_engine_cs *ring)
  378. {
  379. struct drm_device *dev = ring->dev;
  380. struct drm_i915_private *dev_priv = dev->dev_private;
  381. int ret;
  382. ret = gen8_init_common_ring(ring);
  383. if (ret)
  384. return ret;
  385. /* We need to disable the AsyncFlip performance optimisations in order
  386. * to use MI_WAIT_FOR_EVENT within the CS. It should already be
  387. * programmed to '1' on all products.
  388. *
  389. * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
  390. */
  391. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  392. ret = intel_init_pipe_control(ring);
  393. if (ret)
  394. return ret;
  395. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  396. return ret;
  397. }
  398. static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
  399. u64 offset, unsigned flags)
  400. {
  401. bool ppgtt = !(flags & I915_DISPATCH_SECURE);
  402. int ret;
  403. ret = intel_logical_ring_begin(ringbuf, 4);
  404. if (ret)
  405. return ret;
  406. /* FIXME(BDW): Address space and security selectors. */
  407. intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
  408. intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
  409. intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
  410. intel_logical_ring_emit(ringbuf, MI_NOOP);
  411. intel_logical_ring_advance(ringbuf);
  412. return 0;
  413. }
  414. static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
  415. {
  416. struct drm_device *dev = ring->dev;
  417. struct drm_i915_private *dev_priv = dev->dev_private;
  418. unsigned long flags;
  419. if (!dev->irq_enabled)
  420. return false;
  421. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  422. if (ring->irq_refcount++ == 0) {
  423. I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
  424. POSTING_READ(RING_IMR(ring->mmio_base));
  425. }
  426. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  427. return true;
  428. }
  429. static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
  430. {
  431. struct drm_device *dev = ring->dev;
  432. struct drm_i915_private *dev_priv = dev->dev_private;
  433. unsigned long flags;
  434. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  435. if (--ring->irq_refcount == 0) {
  436. I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
  437. POSTING_READ(RING_IMR(ring->mmio_base));
  438. }
  439. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  440. }
  441. static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
  442. u32 invalidate_domains,
  443. u32 unused)
  444. {
  445. struct intel_engine_cs *ring = ringbuf->ring;
  446. struct drm_device *dev = ring->dev;
  447. struct drm_i915_private *dev_priv = dev->dev_private;
  448. uint32_t cmd;
  449. int ret;
  450. ret = intel_logical_ring_begin(ringbuf, 4);
  451. if (ret)
  452. return ret;
  453. cmd = MI_FLUSH_DW + 1;
  454. if (ring == &dev_priv->ring[VCS]) {
  455. if (invalidate_domains & I915_GEM_GPU_DOMAINS)
  456. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
  457. MI_FLUSH_DW_STORE_INDEX |
  458. MI_FLUSH_DW_OP_STOREDW;
  459. } else {
  460. if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
  461. cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
  462. MI_FLUSH_DW_OP_STOREDW;
  463. }
  464. intel_logical_ring_emit(ringbuf, cmd);
  465. intel_logical_ring_emit(ringbuf,
  466. I915_GEM_HWS_SCRATCH_ADDR |
  467. MI_FLUSH_DW_USE_GTT);
  468. intel_logical_ring_emit(ringbuf, 0); /* upper addr */
  469. intel_logical_ring_emit(ringbuf, 0); /* value */
  470. intel_logical_ring_advance(ringbuf);
  471. return 0;
  472. }
  473. static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
  474. u32 invalidate_domains,
  475. u32 flush_domains)
  476. {
  477. struct intel_engine_cs *ring = ringbuf->ring;
  478. u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
  479. u32 flags = 0;
  480. int ret;
  481. flags |= PIPE_CONTROL_CS_STALL;
  482. if (flush_domains) {
  483. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  484. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  485. }
  486. if (invalidate_domains) {
  487. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  488. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  489. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  490. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  491. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  492. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  493. flags |= PIPE_CONTROL_QW_WRITE;
  494. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  495. }
  496. ret = intel_logical_ring_begin(ringbuf, 6);
  497. if (ret)
  498. return ret;
  499. intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
  500. intel_logical_ring_emit(ringbuf, flags);
  501. intel_logical_ring_emit(ringbuf, scratch_addr);
  502. intel_logical_ring_emit(ringbuf, 0);
  503. intel_logical_ring_emit(ringbuf, 0);
  504. intel_logical_ring_emit(ringbuf, 0);
  505. intel_logical_ring_advance(ringbuf);
  506. return 0;
  507. }
  508. static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
  509. {
  510. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  511. }
  512. static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
  513. {
  514. intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
  515. }
  516. static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
  517. {
  518. struct intel_engine_cs *ring = ringbuf->ring;
  519. u32 cmd;
  520. int ret;
  521. ret = intel_logical_ring_begin(ringbuf, 6);
  522. if (ret)
  523. return ret;
  524. cmd = MI_STORE_DWORD_IMM_GEN8;
  525. cmd |= MI_GLOBAL_GTT;
  526. intel_logical_ring_emit(ringbuf, cmd);
  527. intel_logical_ring_emit(ringbuf,
  528. (ring->status_page.gfx_addr +
  529. (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
  530. intel_logical_ring_emit(ringbuf, 0);
  531. intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
  532. intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
  533. intel_logical_ring_emit(ringbuf, MI_NOOP);
  534. intel_logical_ring_advance_and_submit(ringbuf);
  535. return 0;
  536. }
  537. void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
  538. {
  539. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  540. if (!intel_ring_initialized(ring))
  541. return;
  542. intel_logical_ring_stop(ring);
  543. WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
  544. ring->preallocated_lazy_request = NULL;
  545. ring->outstanding_lazy_seqno = 0;
  546. if (ring->cleanup)
  547. ring->cleanup(ring);
  548. i915_cmd_parser_fini_ring(ring);
  549. if (ring->status_page.obj) {
  550. kunmap(sg_page(ring->status_page.obj->pages->sgl));
  551. ring->status_page.obj = NULL;
  552. }
  553. }
  554. static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
  555. {
  556. int ret;
  557. struct intel_context *dctx = ring->default_context;
  558. struct drm_i915_gem_object *dctx_obj;
  559. /* Intentionally left blank. */
  560. ring->buffer = NULL;
  561. ring->dev = dev;
  562. INIT_LIST_HEAD(&ring->active_list);
  563. INIT_LIST_HEAD(&ring->request_list);
  564. init_waitqueue_head(&ring->irq_queue);
  565. ret = intel_lr_context_deferred_create(dctx, ring);
  566. if (ret)
  567. return ret;
  568. /* The status page is offset 0 from the context object in LRCs. */
  569. dctx_obj = dctx->engine[ring->id].state;
  570. ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
  571. ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
  572. if (ring->status_page.page_addr == NULL)
  573. return -ENOMEM;
  574. ring->status_page.obj = dctx_obj;
  575. ret = i915_cmd_parser_init_ring(ring);
  576. if (ret)
  577. return ret;
  578. if (ring->init) {
  579. ret = ring->init(ring);
  580. if (ret)
  581. return ret;
  582. }
  583. return 0;
  584. }
  585. static int logical_render_ring_init(struct drm_device *dev)
  586. {
  587. struct drm_i915_private *dev_priv = dev->dev_private;
  588. struct intel_engine_cs *ring = &dev_priv->ring[RCS];
  589. ring->name = "render ring";
  590. ring->id = RCS;
  591. ring->mmio_base = RENDER_RING_BASE;
  592. ring->irq_enable_mask =
  593. GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
  594. ring->irq_keep_mask =
  595. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
  596. if (HAS_L3_DPF(dev))
  597. ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  598. ring->init = gen8_init_render_ring;
  599. ring->cleanup = intel_fini_pipe_control;
  600. ring->get_seqno = gen8_get_seqno;
  601. ring->set_seqno = gen8_set_seqno;
  602. ring->emit_request = gen8_emit_request;
  603. ring->emit_flush = gen8_emit_flush_render;
  604. ring->irq_get = gen8_logical_ring_get_irq;
  605. ring->irq_put = gen8_logical_ring_put_irq;
  606. ring->emit_bb_start = gen8_emit_bb_start;
  607. return logical_ring_init(dev, ring);
  608. }
  609. static int logical_bsd_ring_init(struct drm_device *dev)
  610. {
  611. struct drm_i915_private *dev_priv = dev->dev_private;
  612. struct intel_engine_cs *ring = &dev_priv->ring[VCS];
  613. ring->name = "bsd ring";
  614. ring->id = VCS;
  615. ring->mmio_base = GEN6_BSD_RING_BASE;
  616. ring->irq_enable_mask =
  617. GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
  618. ring->irq_keep_mask =
  619. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
  620. ring->init = gen8_init_common_ring;
  621. ring->get_seqno = gen8_get_seqno;
  622. ring->set_seqno = gen8_set_seqno;
  623. ring->emit_request = gen8_emit_request;
  624. ring->emit_flush = gen8_emit_flush;
  625. ring->irq_get = gen8_logical_ring_get_irq;
  626. ring->irq_put = gen8_logical_ring_put_irq;
  627. ring->emit_bb_start = gen8_emit_bb_start;
  628. return logical_ring_init(dev, ring);
  629. }
  630. static int logical_bsd2_ring_init(struct drm_device *dev)
  631. {
  632. struct drm_i915_private *dev_priv = dev->dev_private;
  633. struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
  634. ring->name = "bds2 ring";
  635. ring->id = VCS2;
  636. ring->mmio_base = GEN8_BSD2_RING_BASE;
  637. ring->irq_enable_mask =
  638. GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
  639. ring->irq_keep_mask =
  640. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
  641. ring->init = gen8_init_common_ring;
  642. ring->get_seqno = gen8_get_seqno;
  643. ring->set_seqno = gen8_set_seqno;
  644. ring->emit_request = gen8_emit_request;
  645. ring->emit_flush = gen8_emit_flush;
  646. ring->irq_get = gen8_logical_ring_get_irq;
  647. ring->irq_put = gen8_logical_ring_put_irq;
  648. ring->emit_bb_start = gen8_emit_bb_start;
  649. return logical_ring_init(dev, ring);
  650. }
  651. static int logical_blt_ring_init(struct drm_device *dev)
  652. {
  653. struct drm_i915_private *dev_priv = dev->dev_private;
  654. struct intel_engine_cs *ring = &dev_priv->ring[BCS];
  655. ring->name = "blitter ring";
  656. ring->id = BCS;
  657. ring->mmio_base = BLT_RING_BASE;
  658. ring->irq_enable_mask =
  659. GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
  660. ring->irq_keep_mask =
  661. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
  662. ring->init = gen8_init_common_ring;
  663. ring->get_seqno = gen8_get_seqno;
  664. ring->set_seqno = gen8_set_seqno;
  665. ring->emit_request = gen8_emit_request;
  666. ring->emit_flush = gen8_emit_flush;
  667. ring->irq_get = gen8_logical_ring_get_irq;
  668. ring->irq_put = gen8_logical_ring_put_irq;
  669. ring->emit_bb_start = gen8_emit_bb_start;
  670. return logical_ring_init(dev, ring);
  671. }
  672. static int logical_vebox_ring_init(struct drm_device *dev)
  673. {
  674. struct drm_i915_private *dev_priv = dev->dev_private;
  675. struct intel_engine_cs *ring = &dev_priv->ring[VECS];
  676. ring->name = "video enhancement ring";
  677. ring->id = VECS;
  678. ring->mmio_base = VEBOX_RING_BASE;
  679. ring->irq_enable_mask =
  680. GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
  681. ring->irq_keep_mask =
  682. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
  683. ring->init = gen8_init_common_ring;
  684. ring->get_seqno = gen8_get_seqno;
  685. ring->set_seqno = gen8_set_seqno;
  686. ring->emit_request = gen8_emit_request;
  687. ring->emit_flush = gen8_emit_flush;
  688. ring->irq_get = gen8_logical_ring_get_irq;
  689. ring->irq_put = gen8_logical_ring_put_irq;
  690. ring->emit_bb_start = gen8_emit_bb_start;
  691. return logical_ring_init(dev, ring);
  692. }
  693. int intel_logical_rings_init(struct drm_device *dev)
  694. {
  695. struct drm_i915_private *dev_priv = dev->dev_private;
  696. int ret;
  697. ret = logical_render_ring_init(dev);
  698. if (ret)
  699. return ret;
  700. if (HAS_BSD(dev)) {
  701. ret = logical_bsd_ring_init(dev);
  702. if (ret)
  703. goto cleanup_render_ring;
  704. }
  705. if (HAS_BLT(dev)) {
  706. ret = logical_blt_ring_init(dev);
  707. if (ret)
  708. goto cleanup_bsd_ring;
  709. }
  710. if (HAS_VEBOX(dev)) {
  711. ret = logical_vebox_ring_init(dev);
  712. if (ret)
  713. goto cleanup_blt_ring;
  714. }
  715. if (HAS_BSD2(dev)) {
  716. ret = logical_bsd2_ring_init(dev);
  717. if (ret)
  718. goto cleanup_vebox_ring;
  719. }
  720. ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
  721. if (ret)
  722. goto cleanup_bsd2_ring;
  723. return 0;
  724. cleanup_bsd2_ring:
  725. intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
  726. cleanup_vebox_ring:
  727. intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
  728. cleanup_blt_ring:
  729. intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
  730. cleanup_bsd_ring:
  731. intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
  732. cleanup_render_ring:
  733. intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
  734. return ret;
  735. }
  736. static int
  737. populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
  738. struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
  739. {
  740. struct drm_i915_gem_object *ring_obj = ringbuf->obj;
  741. struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
  742. struct page *page;
  743. uint32_t *reg_state;
  744. int ret;
  745. ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
  746. if (ret) {
  747. DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
  748. return ret;
  749. }
  750. ret = i915_gem_object_get_pages(ctx_obj);
  751. if (ret) {
  752. DRM_DEBUG_DRIVER("Could not get object pages\n");
  753. return ret;
  754. }
  755. i915_gem_object_pin_pages(ctx_obj);
  756. /* The second page of the context object contains some fields which must
  757. * be set up prior to the first execution. */
  758. page = i915_gem_object_get_page(ctx_obj, 1);
  759. reg_state = kmap_atomic(page);
  760. /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
  761. * commands followed by (reg, value) pairs. The values we are setting here are
  762. * only for the first context restore: on a subsequent save, the GPU will
  763. * recreate this batchbuffer with new values (including all the missing
  764. * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
  765. if (ring->id == RCS)
  766. reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
  767. else
  768. reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
  769. reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
  770. reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
  771. reg_state[CTX_CONTEXT_CONTROL+1] =
  772. _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
  773. reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
  774. reg_state[CTX_RING_HEAD+1] = 0;
  775. reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
  776. reg_state[CTX_RING_TAIL+1] = 0;
  777. reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
  778. reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
  779. reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
  780. reg_state[CTX_RING_BUFFER_CONTROL+1] =
  781. ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
  782. reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
  783. reg_state[CTX_BB_HEAD_U+1] = 0;
  784. reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
  785. reg_state[CTX_BB_HEAD_L+1] = 0;
  786. reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
  787. reg_state[CTX_BB_STATE+1] = (1<<5);
  788. reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
  789. reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
  790. reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
  791. reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
  792. reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
  793. reg_state[CTX_SECOND_BB_STATE+1] = 0;
  794. if (ring->id == RCS) {
  795. /* TODO: according to BSpec, the register state context
  796. * for CHV does not have these. OTOH, these registers do
  797. * exist in CHV. I'm waiting for a clarification */
  798. reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
  799. reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
  800. reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
  801. reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
  802. reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
  803. reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
  804. }
  805. reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
  806. reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
  807. reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
  808. reg_state[CTX_CTX_TIMESTAMP+1] = 0;
  809. reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
  810. reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
  811. reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
  812. reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
  813. reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
  814. reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
  815. reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
  816. reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
  817. reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
  818. reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
  819. reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
  820. reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
  821. reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
  822. reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
  823. reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
  824. reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
  825. if (ring->id == RCS) {
  826. reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
  827. reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
  828. reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
  829. }
  830. kunmap_atomic(reg_state);
  831. ctx_obj->dirty = 1;
  832. set_page_dirty(page);
  833. i915_gem_object_unpin_pages(ctx_obj);
  834. return 0;
  835. }
  836. void intel_lr_context_free(struct intel_context *ctx)
  837. {
  838. int i;
  839. for (i = 0; i < I915_NUM_RINGS; i++) {
  840. struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
  841. struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
  842. if (ctx_obj) {
  843. intel_destroy_ringbuffer_obj(ringbuf);
  844. kfree(ringbuf);
  845. i915_gem_object_ggtt_unpin(ctx_obj);
  846. drm_gem_object_unreference(&ctx_obj->base);
  847. }
  848. }
  849. }
  850. static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
  851. {
  852. int ret = 0;
  853. WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
  854. switch (ring->id) {
  855. case RCS:
  856. ret = GEN8_LR_CONTEXT_RENDER_SIZE;
  857. break;
  858. case VCS:
  859. case BCS:
  860. case VECS:
  861. case VCS2:
  862. ret = GEN8_LR_CONTEXT_OTHER_SIZE;
  863. break;
  864. }
  865. return ret;
  866. }
  867. int intel_lr_context_deferred_create(struct intel_context *ctx,
  868. struct intel_engine_cs *ring)
  869. {
  870. struct drm_device *dev = ring->dev;
  871. struct drm_i915_gem_object *ctx_obj;
  872. uint32_t context_size;
  873. struct intel_ringbuffer *ringbuf;
  874. int ret;
  875. WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
  876. if (ctx->engine[ring->id].state)
  877. return 0;
  878. context_size = round_up(get_lr_context_size(ring), 4096);
  879. ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
  880. if (IS_ERR(ctx_obj)) {
  881. ret = PTR_ERR(ctx_obj);
  882. DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
  883. return ret;
  884. }
  885. ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
  886. if (ret) {
  887. DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
  888. drm_gem_object_unreference(&ctx_obj->base);
  889. return ret;
  890. }
  891. ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
  892. if (!ringbuf) {
  893. DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
  894. ring->name);
  895. i915_gem_object_ggtt_unpin(ctx_obj);
  896. drm_gem_object_unreference(&ctx_obj->base);
  897. ret = -ENOMEM;
  898. return ret;
  899. }
  900. ringbuf->ring = ring;
  901. ringbuf->size = 32 * PAGE_SIZE;
  902. ringbuf->effective_size = ringbuf->size;
  903. ringbuf->head = 0;
  904. ringbuf->tail = 0;
  905. ringbuf->space = ringbuf->size;
  906. ringbuf->last_retired_head = -1;
  907. /* TODO: For now we put this in the mappable region so that we can reuse
  908. * the existing ringbuffer code which ioremaps it. When we start
  909. * creating many contexts, this will no longer work and we must switch
  910. * to a kmapish interface.
  911. */
  912. ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
  913. if (ret) {
  914. DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
  915. ring->name, ret);
  916. goto error;
  917. }
  918. ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
  919. if (ret) {
  920. DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
  921. intel_destroy_ringbuffer_obj(ringbuf);
  922. goto error;
  923. }
  924. ctx->engine[ring->id].ringbuf = ringbuf;
  925. ctx->engine[ring->id].state = ctx_obj;
  926. return 0;
  927. error:
  928. kfree(ringbuf);
  929. i915_gem_object_ggtt_unpin(ctx_obj);
  930. drm_gem_object_unreference(&ctx_obj->base);
  931. return ret;
  932. }