intel_ringbuffer.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include "drmP.h"
  30. #include "drm.h"
  31. #include "i915_drv.h"
  32. #include "i915_drm.h"
  33. #include "i915_trace.h"
  34. #include "intel_drv.h"
  35. /*
  36. * 965+ support PIPE_CONTROL commands, which provide finer grained control
  37. * over cache flushing.
  38. */
  39. struct pipe_control {
  40. struct drm_i915_gem_object *obj;
  41. volatile u32 *cpu_page;
  42. u32 gtt_offset;
  43. };
  44. static inline int ring_space(struct intel_ring_buffer *ring)
  45. {
  46. int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
  47. if (space < 0)
  48. space += ring->size;
  49. return space;
  50. }
  51. static int
  52. render_ring_flush(struct intel_ring_buffer *ring,
  53. u32 invalidate_domains,
  54. u32 flush_domains)
  55. {
  56. struct drm_device *dev = ring->dev;
  57. u32 cmd;
  58. int ret;
  59. /*
  60. * read/write caches:
  61. *
  62. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  63. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  64. * also flushed at 2d versus 3d pipeline switches.
  65. *
  66. * read-only caches:
  67. *
  68. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  69. * MI_READ_FLUSH is set, and is always flushed on 965.
  70. *
  71. * I915_GEM_DOMAIN_COMMAND may not exist?
  72. *
  73. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  74. * invalidated when MI_EXE_FLUSH is set.
  75. *
  76. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  77. * invalidated with every MI_FLUSH.
  78. *
  79. * TLBs:
  80. *
  81. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  82. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  83. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  84. * are flushed at any MI_FLUSH.
  85. */
  86. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  87. if ((invalidate_domains|flush_domains) &
  88. I915_GEM_DOMAIN_RENDER)
  89. cmd &= ~MI_NO_WRITE_FLUSH;
  90. if (INTEL_INFO(dev)->gen < 4) {
  91. /*
  92. * On the 965, the sampler cache always gets flushed
  93. * and this bit is reserved.
  94. */
  95. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  96. cmd |= MI_READ_FLUSH;
  97. }
  98. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  99. cmd |= MI_EXE_FLUSH;
  100. if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
  101. (IS_G4X(dev) || IS_GEN5(dev)))
  102. cmd |= MI_INVALIDATE_ISP;
  103. ret = intel_ring_begin(ring, 2);
  104. if (ret)
  105. return ret;
  106. intel_ring_emit(ring, cmd);
  107. intel_ring_emit(ring, MI_NOOP);
  108. intel_ring_advance(ring);
  109. return 0;
  110. }
  111. /**
  112. * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  113. * implementing two workarounds on gen6. From section 1.4.7.1
  114. * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
  115. *
  116. * [DevSNB-C+{W/A}] Before any depth stall flush (including those
  117. * produced by non-pipelined state commands), software needs to first
  118. * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
  119. * 0.
  120. *
  121. * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
  122. * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
  123. *
  124. * And the workaround for these two requires this workaround first:
  125. *
  126. * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
  127. * BEFORE the pipe-control with a post-sync op and no write-cache
  128. * flushes.
  129. *
  130. * And this last workaround is tricky because of the requirements on
  131. * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
  132. * volume 2 part 1:
  133. *
  134. * "1 of the following must also be set:
  135. * - Render Target Cache Flush Enable ([12] of DW1)
  136. * - Depth Cache Flush Enable ([0] of DW1)
  137. * - Stall at Pixel Scoreboard ([1] of DW1)
  138. * - Depth Stall ([13] of DW1)
  139. * - Post-Sync Operation ([13] of DW1)
  140. * - Notify Enable ([8] of DW1)"
  141. *
  142. * The cache flushes require the workaround flush that triggered this
  143. * one, so we can't use it. Depth stall would trigger the same.
  144. * Post-sync nonzero is what triggered this second workaround, so we
  145. * can't use that one either. Notify enable is IRQs, which aren't
  146. * really our business. That leaves only stall at scoreboard.
  147. */
  148. static int
  149. intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
  150. {
  151. struct pipe_control *pc = ring->private;
  152. u32 scratch_addr = pc->gtt_offset + 128;
  153. int ret;
  154. ret = intel_ring_begin(ring, 6);
  155. if (ret)
  156. return ret;
  157. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  158. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  159. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  160. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  161. intel_ring_emit(ring, 0); /* low dword */
  162. intel_ring_emit(ring, 0); /* high dword */
  163. intel_ring_emit(ring, MI_NOOP);
  164. intel_ring_advance(ring);
  165. ret = intel_ring_begin(ring, 6);
  166. if (ret)
  167. return ret;
  168. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  169. intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
  170. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  171. intel_ring_emit(ring, 0);
  172. intel_ring_emit(ring, 0);
  173. intel_ring_emit(ring, MI_NOOP);
  174. intel_ring_advance(ring);
  175. return 0;
  176. }
  177. static int
  178. gen6_render_ring_flush(struct intel_ring_buffer *ring,
  179. u32 invalidate_domains, u32 flush_domains)
  180. {
  181. u32 flags = 0;
  182. struct pipe_control *pc = ring->private;
  183. u32 scratch_addr = pc->gtt_offset + 128;
  184. int ret;
  185. /* Force SNB workarounds for PIPE_CONTROL flushes */
  186. intel_emit_post_sync_nonzero_flush(ring);
  187. /* Just flush everything. Experiments have shown that reducing the
  188. * number of bits based on the write domains has little performance
  189. * impact.
  190. */
  191. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  192. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  193. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  194. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  195. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  196. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  197. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  198. ret = intel_ring_begin(ring, 6);
  199. if (ret)
  200. return ret;
  201. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  202. intel_ring_emit(ring, flags);
  203. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  204. intel_ring_emit(ring, 0); /* lower dword */
  205. intel_ring_emit(ring, 0); /* uppwer dword */
  206. intel_ring_emit(ring, MI_NOOP);
  207. intel_ring_advance(ring);
  208. return 0;
  209. }
  210. static void ring_write_tail(struct intel_ring_buffer *ring,
  211. u32 value)
  212. {
  213. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  214. I915_WRITE_TAIL(ring, value);
  215. }
  216. u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
  217. {
  218. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  219. u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
  220. RING_ACTHD(ring->mmio_base) : ACTHD;
  221. return I915_READ(acthd_reg);
  222. }
  223. static int init_ring_common(struct intel_ring_buffer *ring)
  224. {
  225. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  226. struct drm_i915_gem_object *obj = ring->obj;
  227. u32 head;
  228. /* Stop the ring if it's running. */
  229. I915_WRITE_CTL(ring, 0);
  230. I915_WRITE_HEAD(ring, 0);
  231. ring->write_tail(ring, 0);
  232. /* Initialize the ring. */
  233. I915_WRITE_START(ring, obj->gtt_offset);
  234. head = I915_READ_HEAD(ring) & HEAD_ADDR;
  235. /* G45 ring initialization fails to reset head to zero */
  236. if (head != 0) {
  237. DRM_DEBUG_KMS("%s head not reset to zero "
  238. "ctl %08x head %08x tail %08x start %08x\n",
  239. ring->name,
  240. I915_READ_CTL(ring),
  241. I915_READ_HEAD(ring),
  242. I915_READ_TAIL(ring),
  243. I915_READ_START(ring));
  244. I915_WRITE_HEAD(ring, 0);
  245. if (I915_READ_HEAD(ring) & HEAD_ADDR) {
  246. DRM_ERROR("failed to set %s head to zero "
  247. "ctl %08x head %08x tail %08x start %08x\n",
  248. ring->name,
  249. I915_READ_CTL(ring),
  250. I915_READ_HEAD(ring),
  251. I915_READ_TAIL(ring),
  252. I915_READ_START(ring));
  253. }
  254. }
  255. I915_WRITE_CTL(ring,
  256. ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
  257. | RING_VALID);
  258. /* If the head is still not zero, the ring is dead */
  259. if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
  260. I915_READ_START(ring) == obj->gtt_offset &&
  261. (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
  262. DRM_ERROR("%s initialization failed "
  263. "ctl %08x head %08x tail %08x start %08x\n",
  264. ring->name,
  265. I915_READ_CTL(ring),
  266. I915_READ_HEAD(ring),
  267. I915_READ_TAIL(ring),
  268. I915_READ_START(ring));
  269. return -EIO;
  270. }
  271. if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
  272. i915_kernel_lost_context(ring->dev);
  273. else {
  274. ring->head = I915_READ_HEAD(ring);
  275. ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  276. ring->space = ring_space(ring);
  277. }
  278. return 0;
  279. }
  280. static int
  281. init_pipe_control(struct intel_ring_buffer *ring)
  282. {
  283. struct pipe_control *pc;
  284. struct drm_i915_gem_object *obj;
  285. int ret;
  286. if (ring->private)
  287. return 0;
  288. pc = kmalloc(sizeof(*pc), GFP_KERNEL);
  289. if (!pc)
  290. return -ENOMEM;
  291. obj = i915_gem_alloc_object(ring->dev, 4096);
  292. if (obj == NULL) {
  293. DRM_ERROR("Failed to allocate seqno page\n");
  294. ret = -ENOMEM;
  295. goto err;
  296. }
  297. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  298. ret = i915_gem_object_pin(obj, 4096, true);
  299. if (ret)
  300. goto err_unref;
  301. pc->gtt_offset = obj->gtt_offset;
  302. pc->cpu_page = kmap(obj->pages[0]);
  303. if (pc->cpu_page == NULL)
  304. goto err_unpin;
  305. pc->obj = obj;
  306. ring->private = pc;
  307. return 0;
  308. err_unpin:
  309. i915_gem_object_unpin(obj);
  310. err_unref:
  311. drm_gem_object_unreference(&obj->base);
  312. err:
  313. kfree(pc);
  314. return ret;
  315. }
  316. static void
  317. cleanup_pipe_control(struct intel_ring_buffer *ring)
  318. {
  319. struct pipe_control *pc = ring->private;
  320. struct drm_i915_gem_object *obj;
  321. if (!ring->private)
  322. return;
  323. obj = pc->obj;
  324. kunmap(obj->pages[0]);
  325. i915_gem_object_unpin(obj);
  326. drm_gem_object_unreference(&obj->base);
  327. kfree(pc);
  328. ring->private = NULL;
  329. }
  330. static int init_render_ring(struct intel_ring_buffer *ring)
  331. {
  332. struct drm_device *dev = ring->dev;
  333. struct drm_i915_private *dev_priv = dev->dev_private;
  334. int ret = init_ring_common(ring);
  335. if (INTEL_INFO(dev)->gen > 3) {
  336. int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
  337. I915_WRITE(MI_MODE, mode);
  338. if (IS_GEN7(dev))
  339. I915_WRITE(GFX_MODE_GEN7,
  340. GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
  341. GFX_MODE_ENABLE(GFX_REPLAY_MODE));
  342. }
  343. if (INTEL_INFO(dev)->gen >= 5) {
  344. ret = init_pipe_control(ring);
  345. if (ret)
  346. return ret;
  347. }
  348. if (INTEL_INFO(dev)->gen >= 6) {
  349. I915_WRITE(INSTPM,
  350. INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
  351. }
  352. return ret;
  353. }
  354. static void render_ring_cleanup(struct intel_ring_buffer *ring)
  355. {
  356. if (!ring->private)
  357. return;
  358. cleanup_pipe_control(ring);
  359. }
  360. static void
  361. update_mboxes(struct intel_ring_buffer *ring,
  362. u32 seqno,
  363. u32 mmio_offset)
  364. {
  365. intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
  366. MI_SEMAPHORE_GLOBAL_GTT |
  367. MI_SEMAPHORE_REGISTER |
  368. MI_SEMAPHORE_UPDATE);
  369. intel_ring_emit(ring, seqno);
  370. intel_ring_emit(ring, mmio_offset);
  371. }
  372. /**
  373. * gen6_add_request - Update the semaphore mailbox registers
  374. *
  375. * @ring - ring that is adding a request
  376. * @seqno - return seqno stuck into the ring
  377. *
  378. * Update the mailbox registers in the *other* rings with the current seqno.
  379. * This acts like a signal in the canonical semaphore.
  380. */
  381. static int
  382. gen6_add_request(struct intel_ring_buffer *ring,
  383. u32 *seqno)
  384. {
  385. u32 mbox1_reg;
  386. u32 mbox2_reg;
  387. int ret;
  388. ret = intel_ring_begin(ring, 10);
  389. if (ret)
  390. return ret;
  391. mbox1_reg = ring->signal_mbox[0];
  392. mbox2_reg = ring->signal_mbox[1];
  393. *seqno = i915_gem_next_request_seqno(ring);
  394. update_mboxes(ring, *seqno, mbox1_reg);
  395. update_mboxes(ring, *seqno, mbox2_reg);
  396. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  397. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  398. intel_ring_emit(ring, *seqno);
  399. intel_ring_emit(ring, MI_USER_INTERRUPT);
  400. intel_ring_advance(ring);
  401. return 0;
  402. }
  403. /**
  404. * intel_ring_sync - sync the waiter to the signaller on seqno
  405. *
  406. * @waiter - ring that is waiting
  407. * @signaller - ring which has, or will signal
  408. * @seqno - seqno which the waiter will block on
  409. */
  410. static int
  411. gen6_ring_sync(struct intel_ring_buffer *waiter,
  412. struct intel_ring_buffer *signaller,
  413. u32 seqno)
  414. {
  415. int ret;
  416. u32 dw1 = MI_SEMAPHORE_MBOX |
  417. MI_SEMAPHORE_COMPARE |
  418. MI_SEMAPHORE_REGISTER;
  419. /* Throughout all of the GEM code, seqno passed implies our current
  420. * seqno is >= the last seqno executed. However for hardware the
  421. * comparison is strictly greater than.
  422. */
  423. seqno -= 1;
  424. WARN_ON(signaller->semaphore_register[waiter->id] ==
  425. MI_SEMAPHORE_SYNC_INVALID);
  426. ret = intel_ring_begin(waiter, 4);
  427. if (ret)
  428. return ret;
  429. intel_ring_emit(waiter,
  430. dw1 | signaller->semaphore_register[waiter->id]);
  431. intel_ring_emit(waiter, seqno);
  432. intel_ring_emit(waiter, 0);
  433. intel_ring_emit(waiter, MI_NOOP);
  434. intel_ring_advance(waiter);
  435. return 0;
  436. }
  437. #define PIPE_CONTROL_FLUSH(ring__, addr__) \
  438. do { \
  439. intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
  440. PIPE_CONTROL_DEPTH_STALL); \
  441. intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
  442. intel_ring_emit(ring__, 0); \
  443. intel_ring_emit(ring__, 0); \
  444. } while (0)
  445. static int
  446. pc_render_add_request(struct intel_ring_buffer *ring,
  447. u32 *result)
  448. {
  449. u32 seqno = i915_gem_next_request_seqno(ring);
  450. struct pipe_control *pc = ring->private;
  451. u32 scratch_addr = pc->gtt_offset + 128;
  452. int ret;
  453. /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
  454. * incoherent with writes to memory, i.e. completely fubar,
  455. * so we need to use PIPE_NOTIFY instead.
  456. *
  457. * However, we also need to workaround the qword write
  458. * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
  459. * memory before requesting an interrupt.
  460. */
  461. ret = intel_ring_begin(ring, 32);
  462. if (ret)
  463. return ret;
  464. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  465. PIPE_CONTROL_WRITE_FLUSH |
  466. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
  467. intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  468. intel_ring_emit(ring, seqno);
  469. intel_ring_emit(ring, 0);
  470. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  471. scratch_addr += 128; /* write to separate cachelines */
  472. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  473. scratch_addr += 128;
  474. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  475. scratch_addr += 128;
  476. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  477. scratch_addr += 128;
  478. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  479. scratch_addr += 128;
  480. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  481. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  482. PIPE_CONTROL_WRITE_FLUSH |
  483. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
  484. PIPE_CONTROL_NOTIFY);
  485. intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  486. intel_ring_emit(ring, seqno);
  487. intel_ring_emit(ring, 0);
  488. intel_ring_advance(ring);
  489. *result = seqno;
  490. return 0;
  491. }
  492. static int
  493. render_ring_add_request(struct intel_ring_buffer *ring,
  494. u32 *result)
  495. {
  496. u32 seqno = i915_gem_next_request_seqno(ring);
  497. int ret;
  498. ret = intel_ring_begin(ring, 4);
  499. if (ret)
  500. return ret;
  501. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  502. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  503. intel_ring_emit(ring, seqno);
  504. intel_ring_emit(ring, MI_USER_INTERRUPT);
  505. intel_ring_advance(ring);
  506. *result = seqno;
  507. return 0;
  508. }
  509. static u32
  510. gen6_ring_get_seqno(struct intel_ring_buffer *ring)
  511. {
  512. struct drm_device *dev = ring->dev;
  513. /* Workaround to force correct ordering between irq and seqno writes on
  514. * ivb (and maybe also on snb) by reading from a CS register (like
  515. * ACTHD) before reading the status page. */
  516. if (IS_GEN6(dev) || IS_GEN7(dev))
  517. intel_ring_get_active_head(ring);
  518. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  519. }
  520. static u32
  521. ring_get_seqno(struct intel_ring_buffer *ring)
  522. {
  523. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  524. }
  525. static u32
  526. pc_render_get_seqno(struct intel_ring_buffer *ring)
  527. {
  528. struct pipe_control *pc = ring->private;
  529. return pc->cpu_page[0];
  530. }
  531. static void
  532. ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
  533. {
  534. dev_priv->gt_irq_mask &= ~mask;
  535. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  536. POSTING_READ(GTIMR);
  537. }
  538. static void
  539. ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
  540. {
  541. dev_priv->gt_irq_mask |= mask;
  542. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  543. POSTING_READ(GTIMR);
  544. }
  545. static void
  546. i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
  547. {
  548. dev_priv->irq_mask &= ~mask;
  549. I915_WRITE(IMR, dev_priv->irq_mask);
  550. POSTING_READ(IMR);
  551. }
  552. static void
  553. i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
  554. {
  555. dev_priv->irq_mask |= mask;
  556. I915_WRITE(IMR, dev_priv->irq_mask);
  557. POSTING_READ(IMR);
  558. }
  559. static bool
  560. i9xx_ring_get_irq(struct intel_ring_buffer *ring)
  561. {
  562. struct drm_device *dev = ring->dev;
  563. drm_i915_private_t *dev_priv = dev->dev_private;
  564. if (!dev->irq_enabled)
  565. return false;
  566. spin_lock(&ring->irq_lock);
  567. if (ring->irq_refcount++ == 0) {
  568. if (INTEL_INFO(dev)->gen >= 5)
  569. ironlake_enable_irq(dev_priv,
  570. ring->irq_enable_mask);
  571. else
  572. i915_enable_irq(dev_priv, ring->irq_enable_mask);
  573. }
  574. spin_unlock(&ring->irq_lock);
  575. return true;
  576. }
  577. static void
  578. i9xx_ring_put_irq(struct intel_ring_buffer *ring)
  579. {
  580. struct drm_device *dev = ring->dev;
  581. drm_i915_private_t *dev_priv = dev->dev_private;
  582. spin_lock(&ring->irq_lock);
  583. if (--ring->irq_refcount == 0) {
  584. if (INTEL_INFO(dev)->gen >= 5)
  585. ironlake_disable_irq(dev_priv,
  586. ring->irq_enable_mask);
  587. else
  588. i915_disable_irq(dev_priv, ring->irq_enable_mask);
  589. }
  590. spin_unlock(&ring->irq_lock);
  591. }
  592. void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
  593. {
  594. struct drm_device *dev = ring->dev;
  595. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  596. u32 mmio = 0;
  597. /* The ring status page addresses are no longer next to the rest of
  598. * the ring registers as of gen7.
  599. */
  600. if (IS_GEN7(dev)) {
  601. switch (ring->id) {
  602. case RCS:
  603. mmio = RENDER_HWS_PGA_GEN7;
  604. break;
  605. case BCS:
  606. mmio = BLT_HWS_PGA_GEN7;
  607. break;
  608. case VCS:
  609. mmio = BSD_HWS_PGA_GEN7;
  610. break;
  611. }
  612. } else if (IS_GEN6(ring->dev)) {
  613. mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
  614. } else {
  615. mmio = RING_HWS_PGA(ring->mmio_base);
  616. }
  617. I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
  618. POSTING_READ(mmio);
  619. }
  620. static int
  621. bsd_ring_flush(struct intel_ring_buffer *ring,
  622. u32 invalidate_domains,
  623. u32 flush_domains)
  624. {
  625. int ret;
  626. ret = intel_ring_begin(ring, 2);
  627. if (ret)
  628. return ret;
  629. intel_ring_emit(ring, MI_FLUSH);
  630. intel_ring_emit(ring, MI_NOOP);
  631. intel_ring_advance(ring);
  632. return 0;
  633. }
  634. static int
  635. ring_add_request(struct intel_ring_buffer *ring,
  636. u32 *result)
  637. {
  638. u32 seqno;
  639. int ret;
  640. ret = intel_ring_begin(ring, 4);
  641. if (ret)
  642. return ret;
  643. seqno = i915_gem_next_request_seqno(ring);
  644. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  645. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  646. intel_ring_emit(ring, seqno);
  647. intel_ring_emit(ring, MI_USER_INTERRUPT);
  648. intel_ring_advance(ring);
  649. *result = seqno;
  650. return 0;
  651. }
  652. static bool
  653. gen6_ring_get_irq(struct intel_ring_buffer *ring)
  654. {
  655. struct drm_device *dev = ring->dev;
  656. drm_i915_private_t *dev_priv = dev->dev_private;
  657. if (!dev->irq_enabled)
  658. return false;
  659. /* It looks like we need to prevent the gt from suspending while waiting
  660. * for an notifiy irq, otherwise irqs seem to get lost on at least the
  661. * blt/bsd rings on ivb. */
  662. gen6_gt_force_wake_get(dev_priv);
  663. spin_lock(&ring->irq_lock);
  664. if (ring->irq_refcount++ == 0) {
  665. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  666. ironlake_enable_irq(dev_priv, ring->irq_enable_mask);
  667. }
  668. spin_unlock(&ring->irq_lock);
  669. return true;
  670. }
  671. static void
  672. gen6_ring_put_irq(struct intel_ring_buffer *ring)
  673. {
  674. struct drm_device *dev = ring->dev;
  675. drm_i915_private_t *dev_priv = dev->dev_private;
  676. spin_lock(&ring->irq_lock);
  677. if (--ring->irq_refcount == 0) {
  678. I915_WRITE_IMR(ring, ~0);
  679. ironlake_disable_irq(dev_priv, ring->irq_enable_mask);
  680. }
  681. spin_unlock(&ring->irq_lock);
  682. gen6_gt_force_wake_put(dev_priv);
  683. }
  684. static int
  685. ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
  686. {
  687. int ret;
  688. ret = intel_ring_begin(ring, 2);
  689. if (ret)
  690. return ret;
  691. intel_ring_emit(ring,
  692. MI_BATCH_BUFFER_START | (2 << 6) |
  693. MI_BATCH_NON_SECURE_I965);
  694. intel_ring_emit(ring, offset);
  695. intel_ring_advance(ring);
  696. return 0;
  697. }
  698. static int
  699. render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  700. u32 offset, u32 len)
  701. {
  702. struct drm_device *dev = ring->dev;
  703. int ret;
  704. if (IS_I830(dev) || IS_845G(dev)) {
  705. ret = intel_ring_begin(ring, 4);
  706. if (ret)
  707. return ret;
  708. intel_ring_emit(ring, MI_BATCH_BUFFER);
  709. intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
  710. intel_ring_emit(ring, offset + len - 8);
  711. intel_ring_emit(ring, 0);
  712. } else {
  713. ret = intel_ring_begin(ring, 2);
  714. if (ret)
  715. return ret;
  716. if (INTEL_INFO(dev)->gen >= 4) {
  717. intel_ring_emit(ring,
  718. MI_BATCH_BUFFER_START | (2 << 6) |
  719. MI_BATCH_NON_SECURE_I965);
  720. intel_ring_emit(ring, offset);
  721. } else {
  722. intel_ring_emit(ring,
  723. MI_BATCH_BUFFER_START | (2 << 6));
  724. intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
  725. }
  726. }
  727. intel_ring_advance(ring);
  728. return 0;
  729. }
  730. static void cleanup_status_page(struct intel_ring_buffer *ring)
  731. {
  732. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  733. struct drm_i915_gem_object *obj;
  734. obj = ring->status_page.obj;
  735. if (obj == NULL)
  736. return;
  737. kunmap(obj->pages[0]);
  738. i915_gem_object_unpin(obj);
  739. drm_gem_object_unreference(&obj->base);
  740. ring->status_page.obj = NULL;
  741. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  742. }
  743. static int init_status_page(struct intel_ring_buffer *ring)
  744. {
  745. struct drm_device *dev = ring->dev;
  746. drm_i915_private_t *dev_priv = dev->dev_private;
  747. struct drm_i915_gem_object *obj;
  748. int ret;
  749. obj = i915_gem_alloc_object(dev, 4096);
  750. if (obj == NULL) {
  751. DRM_ERROR("Failed to allocate status page\n");
  752. ret = -ENOMEM;
  753. goto err;
  754. }
  755. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  756. ret = i915_gem_object_pin(obj, 4096, true);
  757. if (ret != 0) {
  758. goto err_unref;
  759. }
  760. ring->status_page.gfx_addr = obj->gtt_offset;
  761. ring->status_page.page_addr = kmap(obj->pages[0]);
  762. if (ring->status_page.page_addr == NULL) {
  763. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  764. goto err_unpin;
  765. }
  766. ring->status_page.obj = obj;
  767. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  768. intel_ring_setup_status_page(ring);
  769. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  770. ring->name, ring->status_page.gfx_addr);
  771. return 0;
  772. err_unpin:
  773. i915_gem_object_unpin(obj);
  774. err_unref:
  775. drm_gem_object_unreference(&obj->base);
  776. err:
  777. return ret;
  778. }
  779. int intel_init_ring_buffer(struct drm_device *dev,
  780. struct intel_ring_buffer *ring)
  781. {
  782. struct drm_i915_gem_object *obj;
  783. int ret;
  784. ring->dev = dev;
  785. INIT_LIST_HEAD(&ring->active_list);
  786. INIT_LIST_HEAD(&ring->request_list);
  787. INIT_LIST_HEAD(&ring->gpu_write_list);
  788. ring->size = 32 * PAGE_SIZE;
  789. init_waitqueue_head(&ring->irq_queue);
  790. spin_lock_init(&ring->irq_lock);
  791. if (I915_NEED_GFX_HWS(dev)) {
  792. ret = init_status_page(ring);
  793. if (ret)
  794. return ret;
  795. }
  796. obj = i915_gem_alloc_object(dev, ring->size);
  797. if (obj == NULL) {
  798. DRM_ERROR("Failed to allocate ringbuffer\n");
  799. ret = -ENOMEM;
  800. goto err_hws;
  801. }
  802. ring->obj = obj;
  803. ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
  804. if (ret)
  805. goto err_unref;
  806. ring->map.size = ring->size;
  807. ring->map.offset = dev->agp->base + obj->gtt_offset;
  808. ring->map.type = 0;
  809. ring->map.flags = 0;
  810. ring->map.mtrr = 0;
  811. drm_core_ioremap_wc(&ring->map, dev);
  812. if (ring->map.handle == NULL) {
  813. DRM_ERROR("Failed to map ringbuffer.\n");
  814. ret = -EINVAL;
  815. goto err_unpin;
  816. }
  817. ring->virtual_start = ring->map.handle;
  818. ret = ring->init(ring);
  819. if (ret)
  820. goto err_unmap;
  821. /* Workaround an erratum on the i830 which causes a hang if
  822. * the TAIL pointer points to within the last 2 cachelines
  823. * of the buffer.
  824. */
  825. ring->effective_size = ring->size;
  826. if (IS_I830(ring->dev))
  827. ring->effective_size -= 128;
  828. return 0;
  829. err_unmap:
  830. drm_core_ioremapfree(&ring->map, dev);
  831. err_unpin:
  832. i915_gem_object_unpin(obj);
  833. err_unref:
  834. drm_gem_object_unreference(&obj->base);
  835. ring->obj = NULL;
  836. err_hws:
  837. cleanup_status_page(ring);
  838. return ret;
  839. }
  840. void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
  841. {
  842. struct drm_i915_private *dev_priv;
  843. int ret;
  844. if (ring->obj == NULL)
  845. return;
  846. /* Disable the ring buffer. The ring must be idle at this point */
  847. dev_priv = ring->dev->dev_private;
  848. ret = intel_wait_ring_idle(ring);
  849. if (ret)
  850. DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
  851. ring->name, ret);
  852. I915_WRITE_CTL(ring, 0);
  853. drm_core_ioremapfree(&ring->map, ring->dev);
  854. i915_gem_object_unpin(ring->obj);
  855. drm_gem_object_unreference(&ring->obj->base);
  856. ring->obj = NULL;
  857. if (ring->cleanup)
  858. ring->cleanup(ring);
  859. cleanup_status_page(ring);
  860. }
  861. static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
  862. {
  863. unsigned int *virt;
  864. int rem = ring->size - ring->tail;
  865. if (ring->space < rem) {
  866. int ret = intel_wait_ring_buffer(ring, rem);
  867. if (ret)
  868. return ret;
  869. }
  870. virt = (unsigned int *)(ring->virtual_start + ring->tail);
  871. rem /= 8;
  872. while (rem--) {
  873. *virt++ = MI_NOOP;
  874. *virt++ = MI_NOOP;
  875. }
  876. ring->tail = 0;
  877. ring->space = ring_space(ring);
  878. return 0;
  879. }
  880. static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
  881. {
  882. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  883. bool was_interruptible;
  884. int ret;
  885. /* XXX As we have not yet audited all the paths to check that
  886. * they are ready for ERESTARTSYS from intel_ring_begin, do not
  887. * allow us to be interruptible by a signal.
  888. */
  889. was_interruptible = dev_priv->mm.interruptible;
  890. dev_priv->mm.interruptible = false;
  891. ret = i915_wait_request(ring, seqno, true);
  892. dev_priv->mm.interruptible = was_interruptible;
  893. return ret;
  894. }
  895. static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
  896. {
  897. struct drm_i915_gem_request *request;
  898. u32 seqno = 0;
  899. int ret;
  900. i915_gem_retire_requests_ring(ring);
  901. if (ring->last_retired_head != -1) {
  902. ring->head = ring->last_retired_head;
  903. ring->last_retired_head = -1;
  904. ring->space = ring_space(ring);
  905. if (ring->space >= n)
  906. return 0;
  907. }
  908. list_for_each_entry(request, &ring->request_list, list) {
  909. int space;
  910. if (request->tail == -1)
  911. continue;
  912. space = request->tail - (ring->tail + 8);
  913. if (space < 0)
  914. space += ring->size;
  915. if (space >= n) {
  916. seqno = request->seqno;
  917. break;
  918. }
  919. /* Consume this request in case we need more space than
  920. * is available and so need to prevent a race between
  921. * updating last_retired_head and direct reads of
  922. * I915_RING_HEAD. It also provides a nice sanity check.
  923. */
  924. request->tail = -1;
  925. }
  926. if (seqno == 0)
  927. return -ENOSPC;
  928. ret = intel_ring_wait_seqno(ring, seqno);
  929. if (ret)
  930. return ret;
  931. if (WARN_ON(ring->last_retired_head == -1))
  932. return -ENOSPC;
  933. ring->head = ring->last_retired_head;
  934. ring->last_retired_head = -1;
  935. ring->space = ring_space(ring);
  936. if (WARN_ON(ring->space < n))
  937. return -ENOSPC;
  938. return 0;
  939. }
  940. int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
  941. {
  942. struct drm_device *dev = ring->dev;
  943. struct drm_i915_private *dev_priv = dev->dev_private;
  944. unsigned long end;
  945. int ret;
  946. ret = intel_ring_wait_request(ring, n);
  947. if (ret != -ENOSPC)
  948. return ret;
  949. trace_i915_ring_wait_begin(ring);
  950. if (drm_core_check_feature(dev, DRIVER_GEM))
  951. /* With GEM the hangcheck timer should kick us out of the loop,
  952. * leaving it early runs the risk of corrupting GEM state (due
  953. * to running on almost untested codepaths). But on resume
  954. * timers don't work yet, so prevent a complete hang in that
  955. * case by choosing an insanely large timeout. */
  956. end = jiffies + 60 * HZ;
  957. else
  958. end = jiffies + 3 * HZ;
  959. do {
  960. ring->head = I915_READ_HEAD(ring);
  961. ring->space = ring_space(ring);
  962. if (ring->space >= n) {
  963. trace_i915_ring_wait_end(ring);
  964. return 0;
  965. }
  966. if (dev->primary->master) {
  967. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  968. if (master_priv->sarea_priv)
  969. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  970. }
  971. msleep(1);
  972. if (atomic_read(&dev_priv->mm.wedged))
  973. return -EAGAIN;
  974. } while (!time_after(jiffies, end));
  975. trace_i915_ring_wait_end(ring);
  976. return -EBUSY;
  977. }
  978. int intel_ring_begin(struct intel_ring_buffer *ring,
  979. int num_dwords)
  980. {
  981. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  982. int n = 4*num_dwords;
  983. int ret;
  984. if (unlikely(atomic_read(&dev_priv->mm.wedged)))
  985. return -EIO;
  986. if (unlikely(ring->tail + n > ring->effective_size)) {
  987. ret = intel_wrap_ring_buffer(ring);
  988. if (unlikely(ret))
  989. return ret;
  990. }
  991. if (unlikely(ring->space < n)) {
  992. ret = intel_wait_ring_buffer(ring, n);
  993. if (unlikely(ret))
  994. return ret;
  995. }
  996. ring->space -= n;
  997. return 0;
  998. }
  999. void intel_ring_advance(struct intel_ring_buffer *ring)
  1000. {
  1001. ring->tail &= ring->size - 1;
  1002. ring->write_tail(ring, ring->tail);
  1003. }
  1004. static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
  1005. u32 value)
  1006. {
  1007. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1008. /* Every tail move must follow the sequence below */
  1009. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1010. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
  1011. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
  1012. I915_WRITE(GEN6_BSD_RNCID, 0x0);
  1013. if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
  1014. GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
  1015. 50))
  1016. DRM_ERROR("timed out waiting for IDLE Indicator\n");
  1017. I915_WRITE_TAIL(ring, value);
  1018. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1019. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
  1020. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
  1021. }
  1022. static int gen6_ring_flush(struct intel_ring_buffer *ring,
  1023. u32 invalidate, u32 flush)
  1024. {
  1025. uint32_t cmd;
  1026. int ret;
  1027. ret = intel_ring_begin(ring, 4);
  1028. if (ret)
  1029. return ret;
  1030. cmd = MI_FLUSH_DW;
  1031. if (invalidate & I915_GEM_GPU_DOMAINS)
  1032. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
  1033. intel_ring_emit(ring, cmd);
  1034. intel_ring_emit(ring, 0);
  1035. intel_ring_emit(ring, 0);
  1036. intel_ring_emit(ring, MI_NOOP);
  1037. intel_ring_advance(ring);
  1038. return 0;
  1039. }
  1040. static int
  1041. gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1042. u32 offset, u32 len)
  1043. {
  1044. int ret;
  1045. ret = intel_ring_begin(ring, 2);
  1046. if (ret)
  1047. return ret;
  1048. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
  1049. /* bit0-7 is the length on GEN6+ */
  1050. intel_ring_emit(ring, offset);
  1051. intel_ring_advance(ring);
  1052. return 0;
  1053. }
  1054. /* Blitter support (SandyBridge+) */
  1055. static int blt_ring_flush(struct intel_ring_buffer *ring,
  1056. u32 invalidate, u32 flush)
  1057. {
  1058. uint32_t cmd;
  1059. int ret;
  1060. ret = intel_ring_begin(ring, 4);
  1061. if (ret)
  1062. return ret;
  1063. cmd = MI_FLUSH_DW;
  1064. if (invalidate & I915_GEM_DOMAIN_RENDER)
  1065. cmd |= MI_INVALIDATE_TLB;
  1066. intel_ring_emit(ring, cmd);
  1067. intel_ring_emit(ring, 0);
  1068. intel_ring_emit(ring, 0);
  1069. intel_ring_emit(ring, MI_NOOP);
  1070. intel_ring_advance(ring);
  1071. return 0;
  1072. }
  1073. int intel_init_render_ring_buffer(struct drm_device *dev)
  1074. {
  1075. drm_i915_private_t *dev_priv = dev->dev_private;
  1076. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1077. ring->name = "render ring";
  1078. ring->id = RCS;
  1079. ring->mmio_base = RENDER_RING_BASE;
  1080. if (INTEL_INFO(dev)->gen >= 6) {
  1081. ring->add_request = gen6_add_request;
  1082. ring->flush = gen6_render_ring_flush;
  1083. ring->irq_get = gen6_ring_get_irq;
  1084. ring->irq_put = gen6_ring_put_irq;
  1085. ring->irq_enable_mask = GT_USER_INTERRUPT;
  1086. ring->get_seqno = gen6_ring_get_seqno;
  1087. ring->sync_to = gen6_ring_sync;
  1088. ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
  1089. ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
  1090. ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
  1091. ring->signal_mbox[0] = GEN6_VRSYNC;
  1092. ring->signal_mbox[1] = GEN6_BRSYNC;
  1093. } else if (IS_GEN5(dev)) {
  1094. ring->add_request = pc_render_add_request;
  1095. ring->flush = render_ring_flush;
  1096. ring->get_seqno = pc_render_get_seqno;
  1097. ring->irq_get = i9xx_ring_get_irq;
  1098. ring->irq_put = i9xx_ring_put_irq;
  1099. ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
  1100. } else {
  1101. ring->add_request = render_ring_add_request;
  1102. ring->flush = render_ring_flush;
  1103. ring->get_seqno = ring_get_seqno;
  1104. ring->irq_get = i9xx_ring_get_irq;
  1105. ring->irq_put = i9xx_ring_put_irq;
  1106. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1107. }
  1108. ring->write_tail = ring_write_tail;
  1109. ring->dispatch_execbuffer = render_ring_dispatch_execbuffer;
  1110. ring->init = init_render_ring;
  1111. ring->cleanup = render_ring_cleanup;
  1112. if (!I915_NEED_GFX_HWS(dev)) {
  1113. ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1114. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  1115. }
  1116. return intel_init_ring_buffer(dev, ring);
  1117. }
  1118. int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
  1119. {
  1120. drm_i915_private_t *dev_priv = dev->dev_private;
  1121. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1122. ring->name = "render ring";
  1123. ring->id = RCS;
  1124. ring->mmio_base = RENDER_RING_BASE;
  1125. if (INTEL_INFO(dev)->gen >= 6) {
  1126. /* non-kms not supported on gen6+ */
  1127. return -ENODEV;
  1128. } else if (IS_GEN5(dev)) {
  1129. ring->add_request = pc_render_add_request;
  1130. ring->flush = render_ring_flush;
  1131. ring->get_seqno = pc_render_get_seqno;
  1132. ring->irq_get = i9xx_ring_get_irq;
  1133. ring->irq_put = i9xx_ring_put_irq;
  1134. ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
  1135. } else {
  1136. ring->add_request = render_ring_add_request;
  1137. ring->flush = render_ring_flush;
  1138. ring->get_seqno = ring_get_seqno;
  1139. ring->irq_get = i9xx_ring_get_irq;
  1140. ring->irq_put = i9xx_ring_put_irq;
  1141. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1142. }
  1143. ring->write_tail = ring_write_tail;
  1144. ring->dispatch_execbuffer = render_ring_dispatch_execbuffer;
  1145. ring->init = init_render_ring;
  1146. ring->cleanup = render_ring_cleanup;
  1147. if (!I915_NEED_GFX_HWS(dev))
  1148. ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1149. ring->dev = dev;
  1150. INIT_LIST_HEAD(&ring->active_list);
  1151. INIT_LIST_HEAD(&ring->request_list);
  1152. INIT_LIST_HEAD(&ring->gpu_write_list);
  1153. ring->size = size;
  1154. ring->effective_size = ring->size;
  1155. if (IS_I830(ring->dev))
  1156. ring->effective_size -= 128;
  1157. ring->map.offset = start;
  1158. ring->map.size = size;
  1159. ring->map.type = 0;
  1160. ring->map.flags = 0;
  1161. ring->map.mtrr = 0;
  1162. drm_core_ioremap_wc(&ring->map, dev);
  1163. if (ring->map.handle == NULL) {
  1164. DRM_ERROR("can not ioremap virtual address for"
  1165. " ring buffer\n");
  1166. return -ENOMEM;
  1167. }
  1168. ring->virtual_start = (void __force __iomem *)ring->map.handle;
  1169. return 0;
  1170. }
  1171. int intel_init_bsd_ring_buffer(struct drm_device *dev)
  1172. {
  1173. drm_i915_private_t *dev_priv = dev->dev_private;
  1174. struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
  1175. ring->name = "bsd ring";
  1176. ring->id = VCS;
  1177. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  1178. ring->mmio_base = GEN6_BSD_RING_BASE;
  1179. ring->write_tail = gen6_bsd_ring_write_tail;
  1180. ring->flush = gen6_ring_flush;
  1181. ring->add_request = gen6_add_request;
  1182. ring->get_seqno = gen6_ring_get_seqno;
  1183. ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
  1184. ring->irq_get = gen6_ring_get_irq;
  1185. ring->irq_put = gen6_ring_put_irq;
  1186. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1187. ring->sync_to = gen6_ring_sync;
  1188. ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
  1189. ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
  1190. ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
  1191. ring->signal_mbox[0] = GEN6_RVSYNC;
  1192. ring->signal_mbox[1] = GEN6_BVSYNC;
  1193. } else {
  1194. ring->mmio_base = BSD_RING_BASE;
  1195. ring->write_tail = ring_write_tail;
  1196. ring->flush = bsd_ring_flush;
  1197. ring->add_request = ring_add_request;
  1198. ring->get_seqno = ring_get_seqno;
  1199. ring->irq_get = i9xx_ring_get_irq;
  1200. ring->irq_put = i9xx_ring_put_irq;
  1201. if (IS_GEN5(dev))
  1202. ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
  1203. else
  1204. ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
  1205. ring->dispatch_execbuffer = ring_dispatch_execbuffer;
  1206. }
  1207. ring->init = init_ring_common;
  1208. return intel_init_ring_buffer(dev, ring);
  1209. }
  1210. int intel_init_blt_ring_buffer(struct drm_device *dev)
  1211. {
  1212. drm_i915_private_t *dev_priv = dev->dev_private;
  1213. struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
  1214. ring->name = "blitter ring";
  1215. ring->id = BCS;
  1216. ring->mmio_base = BLT_RING_BASE;
  1217. ring->write_tail = ring_write_tail;
  1218. ring->flush = blt_ring_flush;
  1219. ring->add_request = gen6_add_request;
  1220. ring->get_seqno = gen6_ring_get_seqno;
  1221. ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
  1222. ring->irq_get = gen6_ring_get_irq;
  1223. ring->irq_put = gen6_ring_put_irq;
  1224. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1225. ring->sync_to = gen6_ring_sync;
  1226. ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
  1227. ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
  1228. ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
  1229. ring->signal_mbox[0] = GEN6_RBSYNC;
  1230. ring->signal_mbox[1] = GEN6_VBSYNC;
  1231. ring->init = init_ring_common;
  1232. return intel_init_ring_buffer(dev, ring);
  1233. }