intel_ringbuffer.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include <drm/drmP.h>
  30. #include "i915_drv.h"
  31. #include <drm/i915_drm.h>
  32. #include "i915_trace.h"
  33. #include "intel_drv.h"
  34. static inline int ring_space(struct intel_ring_buffer *ring)
  35. {
  36. int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
  37. if (space < 0)
  38. space += ring->size;
  39. return space;
  40. }
  41. void __intel_ring_advance(struct intel_ring_buffer *ring)
  42. {
  43. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  44. ring->tail &= ring->size - 1;
  45. if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
  46. return;
  47. ring->write_tail(ring, ring->tail);
  48. }
  49. static int
  50. gen2_render_ring_flush(struct intel_ring_buffer *ring,
  51. u32 invalidate_domains,
  52. u32 flush_domains)
  53. {
  54. u32 cmd;
  55. int ret;
  56. cmd = MI_FLUSH;
  57. if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
  58. cmd |= MI_NO_WRITE_FLUSH;
  59. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  60. cmd |= MI_READ_FLUSH;
  61. ret = intel_ring_begin(ring, 2);
  62. if (ret)
  63. return ret;
  64. intel_ring_emit(ring, cmd);
  65. intel_ring_emit(ring, MI_NOOP);
  66. intel_ring_advance(ring);
  67. return 0;
  68. }
  69. static int
  70. gen4_render_ring_flush(struct intel_ring_buffer *ring,
  71. u32 invalidate_domains,
  72. u32 flush_domains)
  73. {
  74. struct drm_device *dev = ring->dev;
  75. u32 cmd;
  76. int ret;
  77. /*
  78. * read/write caches:
  79. *
  80. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  81. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  82. * also flushed at 2d versus 3d pipeline switches.
  83. *
  84. * read-only caches:
  85. *
  86. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  87. * MI_READ_FLUSH is set, and is always flushed on 965.
  88. *
  89. * I915_GEM_DOMAIN_COMMAND may not exist?
  90. *
  91. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  92. * invalidated when MI_EXE_FLUSH is set.
  93. *
  94. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  95. * invalidated with every MI_FLUSH.
  96. *
  97. * TLBs:
  98. *
  99. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  100. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  101. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  102. * are flushed at any MI_FLUSH.
  103. */
  104. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  105. if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
  106. cmd &= ~MI_NO_WRITE_FLUSH;
  107. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  108. cmd |= MI_EXE_FLUSH;
  109. if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
  110. (IS_G4X(dev) || IS_GEN5(dev)))
  111. cmd |= MI_INVALIDATE_ISP;
  112. ret = intel_ring_begin(ring, 2);
  113. if (ret)
  114. return ret;
  115. intel_ring_emit(ring, cmd);
  116. intel_ring_emit(ring, MI_NOOP);
  117. intel_ring_advance(ring);
  118. return 0;
  119. }
  120. /**
  121. * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  122. * implementing two workarounds on gen6. From section 1.4.7.1
  123. * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
  124. *
  125. * [DevSNB-C+{W/A}] Before any depth stall flush (including those
  126. * produced by non-pipelined state commands), software needs to first
  127. * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
  128. * 0.
  129. *
  130. * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
  131. * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
  132. *
  133. * And the workaround for these two requires this workaround first:
  134. *
  135. * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
  136. * BEFORE the pipe-control with a post-sync op and no write-cache
  137. * flushes.
  138. *
  139. * And this last workaround is tricky because of the requirements on
  140. * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
  141. * volume 2 part 1:
  142. *
  143. * "1 of the following must also be set:
  144. * - Render Target Cache Flush Enable ([12] of DW1)
  145. * - Depth Cache Flush Enable ([0] of DW1)
  146. * - Stall at Pixel Scoreboard ([1] of DW1)
  147. * - Depth Stall ([13] of DW1)
  148. * - Post-Sync Operation ([13] of DW1)
  149. * - Notify Enable ([8] of DW1)"
  150. *
  151. * The cache flushes require the workaround flush that triggered this
  152. * one, so we can't use it. Depth stall would trigger the same.
  153. * Post-sync nonzero is what triggered this second workaround, so we
  154. * can't use that one either. Notify enable is IRQs, which aren't
  155. * really our business. That leaves only stall at scoreboard.
  156. */
  157. static int
  158. intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
  159. {
  160. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  161. int ret;
  162. ret = intel_ring_begin(ring, 6);
  163. if (ret)
  164. return ret;
  165. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  166. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  167. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  168. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  169. intel_ring_emit(ring, 0); /* low dword */
  170. intel_ring_emit(ring, 0); /* high dword */
  171. intel_ring_emit(ring, MI_NOOP);
  172. intel_ring_advance(ring);
  173. ret = intel_ring_begin(ring, 6);
  174. if (ret)
  175. return ret;
  176. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  177. intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
  178. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  179. intel_ring_emit(ring, 0);
  180. intel_ring_emit(ring, 0);
  181. intel_ring_emit(ring, MI_NOOP);
  182. intel_ring_advance(ring);
  183. return 0;
  184. }
  185. static int
  186. gen6_render_ring_flush(struct intel_ring_buffer *ring,
  187. u32 invalidate_domains, u32 flush_domains)
  188. {
  189. u32 flags = 0;
  190. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  191. int ret;
  192. /* Force SNB workarounds for PIPE_CONTROL flushes */
  193. ret = intel_emit_post_sync_nonzero_flush(ring);
  194. if (ret)
  195. return ret;
  196. /* Just flush everything. Experiments have shown that reducing the
  197. * number of bits based on the write domains has little performance
  198. * impact.
  199. */
  200. if (flush_domains) {
  201. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  202. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  203. /*
  204. * Ensure that any following seqno writes only happen
  205. * when the render cache is indeed flushed.
  206. */
  207. flags |= PIPE_CONTROL_CS_STALL;
  208. }
  209. if (invalidate_domains) {
  210. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  211. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  212. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  213. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  214. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  215. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  216. /*
  217. * TLB invalidate requires a post-sync write.
  218. */
  219. flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
  220. }
  221. ret = intel_ring_begin(ring, 4);
  222. if (ret)
  223. return ret;
  224. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  225. intel_ring_emit(ring, flags);
  226. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  227. intel_ring_emit(ring, 0);
  228. intel_ring_advance(ring);
  229. return 0;
  230. }
  231. static int
  232. gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
  233. {
  234. int ret;
  235. ret = intel_ring_begin(ring, 4);
  236. if (ret)
  237. return ret;
  238. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  239. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  240. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  241. intel_ring_emit(ring, 0);
  242. intel_ring_emit(ring, 0);
  243. intel_ring_advance(ring);
  244. return 0;
  245. }
  246. static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
  247. {
  248. int ret;
  249. if (!ring->fbc_dirty)
  250. return 0;
  251. ret = intel_ring_begin(ring, 6);
  252. if (ret)
  253. return ret;
  254. /* WaFbcNukeOn3DBlt:ivb/hsw */
  255. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  256. intel_ring_emit(ring, MSG_FBC_REND_STATE);
  257. intel_ring_emit(ring, value);
  258. intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
  259. intel_ring_emit(ring, MSG_FBC_REND_STATE);
  260. intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
  261. intel_ring_advance(ring);
  262. ring->fbc_dirty = false;
  263. return 0;
  264. }
  265. static int
  266. gen7_render_ring_flush(struct intel_ring_buffer *ring,
  267. u32 invalidate_domains, u32 flush_domains)
  268. {
  269. u32 flags = 0;
  270. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  271. int ret;
  272. /*
  273. * Ensure that any following seqno writes only happen when the render
  274. * cache is indeed flushed.
  275. *
  276. * Workaround: 4th PIPE_CONTROL command (except the ones with only
  277. * read-cache invalidate bits set) must have the CS_STALL bit set. We
  278. * don't try to be clever and just set it unconditionally.
  279. */
  280. flags |= PIPE_CONTROL_CS_STALL;
  281. /* Just flush everything. Experiments have shown that reducing the
  282. * number of bits based on the write domains has little performance
  283. * impact.
  284. */
  285. if (flush_domains) {
  286. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  287. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  288. }
  289. if (invalidate_domains) {
  290. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  291. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  292. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  293. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  294. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  295. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  296. /*
  297. * TLB invalidate requires a post-sync write.
  298. */
  299. flags |= PIPE_CONTROL_QW_WRITE;
  300. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  301. /* Workaround: we must issue a pipe_control with CS-stall bit
  302. * set before a pipe_control command that has the state cache
  303. * invalidate bit set. */
  304. gen7_render_ring_cs_stall_wa(ring);
  305. }
  306. ret = intel_ring_begin(ring, 4);
  307. if (ret)
  308. return ret;
  309. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  310. intel_ring_emit(ring, flags);
  311. intel_ring_emit(ring, scratch_addr);
  312. intel_ring_emit(ring, 0);
  313. intel_ring_advance(ring);
  314. if (!invalidate_domains && flush_domains)
  315. return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
  316. return 0;
  317. }
  318. static int
  319. gen8_render_ring_flush(struct intel_ring_buffer *ring,
  320. u32 invalidate_domains, u32 flush_domains)
  321. {
  322. u32 flags = 0;
  323. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  324. int ret;
  325. flags |= PIPE_CONTROL_CS_STALL;
  326. if (flush_domains) {
  327. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  328. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  329. }
  330. if (invalidate_domains) {
  331. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  332. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  333. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  334. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  335. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  336. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  337. flags |= PIPE_CONTROL_QW_WRITE;
  338. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  339. }
  340. ret = intel_ring_begin(ring, 6);
  341. if (ret)
  342. return ret;
  343. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
  344. intel_ring_emit(ring, flags);
  345. intel_ring_emit(ring, scratch_addr);
  346. intel_ring_emit(ring, 0);
  347. intel_ring_emit(ring, 0);
  348. intel_ring_emit(ring, 0);
  349. intel_ring_advance(ring);
  350. return 0;
  351. }
  352. static void ring_write_tail(struct intel_ring_buffer *ring,
  353. u32 value)
  354. {
  355. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  356. I915_WRITE_TAIL(ring, value);
  357. }
  358. u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
  359. {
  360. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  361. u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
  362. RING_ACTHD(ring->mmio_base) : ACTHD;
  363. return I915_READ(acthd_reg);
  364. }
  365. static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
  366. {
  367. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  368. u32 addr;
  369. addr = dev_priv->status_page_dmah->busaddr;
  370. if (INTEL_INFO(ring->dev)->gen >= 4)
  371. addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  372. I915_WRITE(HWS_PGA, addr);
  373. }
  374. static int init_ring_common(struct intel_ring_buffer *ring)
  375. {
  376. struct drm_device *dev = ring->dev;
  377. drm_i915_private_t *dev_priv = dev->dev_private;
  378. struct drm_i915_gem_object *obj = ring->obj;
  379. int ret = 0;
  380. u32 head;
  381. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  382. if (I915_NEED_GFX_HWS(dev))
  383. intel_ring_setup_status_page(ring);
  384. else
  385. ring_setup_phys_status_page(ring);
  386. /* Stop the ring if it's running. */
  387. I915_WRITE_CTL(ring, 0);
  388. I915_WRITE_HEAD(ring, 0);
  389. ring->write_tail(ring, 0);
  390. head = I915_READ_HEAD(ring) & HEAD_ADDR;
  391. /* G45 ring initialization fails to reset head to zero */
  392. if (head != 0) {
  393. DRM_DEBUG_KMS("%s head not reset to zero "
  394. "ctl %08x head %08x tail %08x start %08x\n",
  395. ring->name,
  396. I915_READ_CTL(ring),
  397. I915_READ_HEAD(ring),
  398. I915_READ_TAIL(ring),
  399. I915_READ_START(ring));
  400. I915_WRITE_HEAD(ring, 0);
  401. if (I915_READ_HEAD(ring) & HEAD_ADDR) {
  402. DRM_ERROR("failed to set %s head to zero "
  403. "ctl %08x head %08x tail %08x start %08x\n",
  404. ring->name,
  405. I915_READ_CTL(ring),
  406. I915_READ_HEAD(ring),
  407. I915_READ_TAIL(ring),
  408. I915_READ_START(ring));
  409. }
  410. }
  411. /* Initialize the ring. This must happen _after_ we've cleared the ring
  412. * registers with the above sequence (the readback of the HEAD registers
  413. * also enforces ordering), otherwise the hw might lose the new ring
  414. * register values. */
  415. I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
  416. I915_WRITE_CTL(ring,
  417. ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
  418. | RING_VALID);
  419. /* If the head is still not zero, the ring is dead */
  420. if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
  421. I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
  422. (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
  423. DRM_ERROR("%s initialization failed "
  424. "ctl %08x head %08x tail %08x start %08x\n",
  425. ring->name,
  426. I915_READ_CTL(ring),
  427. I915_READ_HEAD(ring),
  428. I915_READ_TAIL(ring),
  429. I915_READ_START(ring));
  430. ret = -EIO;
  431. goto out;
  432. }
  433. if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
  434. i915_kernel_lost_context(ring->dev);
  435. else {
  436. ring->head = I915_READ_HEAD(ring);
  437. ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  438. ring->space = ring_space(ring);
  439. ring->last_retired_head = -1;
  440. }
  441. memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
  442. out:
  443. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  444. return ret;
  445. }
  446. static int
  447. init_pipe_control(struct intel_ring_buffer *ring)
  448. {
  449. int ret;
  450. if (ring->scratch.obj)
  451. return 0;
  452. ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
  453. if (ring->scratch.obj == NULL) {
  454. DRM_ERROR("Failed to allocate seqno page\n");
  455. ret = -ENOMEM;
  456. goto err;
  457. }
  458. ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
  459. if (ret)
  460. goto err_unref;
  461. ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
  462. if (ret)
  463. goto err_unref;
  464. ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
  465. ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
  466. if (ring->scratch.cpu_page == NULL) {
  467. ret = -ENOMEM;
  468. goto err_unpin;
  469. }
  470. DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
  471. ring->name, ring->scratch.gtt_offset);
  472. return 0;
  473. err_unpin:
  474. i915_gem_object_ggtt_unpin(ring->scratch.obj);
  475. err_unref:
  476. drm_gem_object_unreference(&ring->scratch.obj->base);
  477. err:
  478. return ret;
  479. }
  480. static int init_render_ring(struct intel_ring_buffer *ring)
  481. {
  482. struct drm_device *dev = ring->dev;
  483. struct drm_i915_private *dev_priv = dev->dev_private;
  484. int ret = init_ring_common(ring);
  485. if (INTEL_INFO(dev)->gen > 3)
  486. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  487. /* We need to disable the AsyncFlip performance optimisations in order
  488. * to use MI_WAIT_FOR_EVENT within the CS. It should already be
  489. * programmed to '1' on all products.
  490. *
  491. * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
  492. */
  493. if (INTEL_INFO(dev)->gen >= 6)
  494. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  495. /* Required for the hardware to program scanline values for waiting */
  496. if (INTEL_INFO(dev)->gen == 6)
  497. I915_WRITE(GFX_MODE,
  498. _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
  499. if (IS_GEN7(dev))
  500. I915_WRITE(GFX_MODE_GEN7,
  501. _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
  502. _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  503. if (INTEL_INFO(dev)->gen >= 5) {
  504. ret = init_pipe_control(ring);
  505. if (ret)
  506. return ret;
  507. }
  508. if (IS_GEN6(dev)) {
  509. /* From the Sandybridge PRM, volume 1 part 3, page 24:
  510. * "If this bit is set, STCunit will have LRA as replacement
  511. * policy. [...] This bit must be reset. LRA replacement
  512. * policy is not supported."
  513. */
  514. I915_WRITE(CACHE_MODE_0,
  515. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  516. /* This is not explicitly set for GEN6, so read the register.
  517. * see intel_ring_mi_set_context() for why we care.
  518. * TODO: consider explicitly setting the bit for GEN5
  519. */
  520. ring->itlb_before_ctx_switch =
  521. !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
  522. }
  523. if (INTEL_INFO(dev)->gen >= 6)
  524. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  525. if (HAS_L3_DPF(dev))
  526. I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
  527. return ret;
  528. }
  529. static void render_ring_cleanup(struct intel_ring_buffer *ring)
  530. {
  531. struct drm_device *dev = ring->dev;
  532. if (ring->scratch.obj == NULL)
  533. return;
  534. if (INTEL_INFO(dev)->gen >= 5) {
  535. kunmap(sg_page(ring->scratch.obj->pages->sgl));
  536. i915_gem_object_ggtt_unpin(ring->scratch.obj);
  537. }
  538. drm_gem_object_unreference(&ring->scratch.obj->base);
  539. ring->scratch.obj = NULL;
  540. }
  541. static void
  542. update_mboxes(struct intel_ring_buffer *ring,
  543. u32 mmio_offset)
  544. {
  545. /* NB: In order to be able to do semaphore MBOX updates for varying number
  546. * of rings, it's easiest if we round up each individual update to a
  547. * multiple of 2 (since ring updates must always be a multiple of 2)
  548. * even though the actual update only requires 3 dwords.
  549. */
  550. #define MBOX_UPDATE_DWORDS 4
  551. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  552. intel_ring_emit(ring, mmio_offset);
  553. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  554. intel_ring_emit(ring, MI_NOOP);
  555. }
  556. /**
  557. * gen6_add_request - Update the semaphore mailbox registers
  558. *
  559. * @ring - ring that is adding a request
  560. * @seqno - return seqno stuck into the ring
  561. *
  562. * Update the mailbox registers in the *other* rings with the current seqno.
  563. * This acts like a signal in the canonical semaphore.
  564. */
  565. static int
  566. gen6_add_request(struct intel_ring_buffer *ring)
  567. {
  568. struct drm_device *dev = ring->dev;
  569. struct drm_i915_private *dev_priv = dev->dev_private;
  570. struct intel_ring_buffer *useless;
  571. int i, ret, num_dwords = 4;
  572. if (i915_semaphore_is_enabled(dev))
  573. num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
  574. #undef MBOX_UPDATE_DWORDS
  575. ret = intel_ring_begin(ring, num_dwords);
  576. if (ret)
  577. return ret;
  578. if (i915_semaphore_is_enabled(dev)) {
  579. for_each_ring(useless, dev_priv, i) {
  580. u32 mbox_reg = ring->signal_mbox[i];
  581. if (mbox_reg != GEN6_NOSYNC)
  582. update_mboxes(ring, mbox_reg);
  583. }
  584. }
  585. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  586. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  587. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  588. intel_ring_emit(ring, MI_USER_INTERRUPT);
  589. __intel_ring_advance(ring);
  590. return 0;
  591. }
  592. static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
  593. u32 seqno)
  594. {
  595. struct drm_i915_private *dev_priv = dev->dev_private;
  596. return dev_priv->last_seqno < seqno;
  597. }
  598. /**
  599. * intel_ring_sync - sync the waiter to the signaller on seqno
  600. *
  601. * @waiter - ring that is waiting
  602. * @signaller - ring which has, or will signal
  603. * @seqno - seqno which the waiter will block on
  604. */
  605. static int
  606. gen6_ring_sync(struct intel_ring_buffer *waiter,
  607. struct intel_ring_buffer *signaller,
  608. u32 seqno)
  609. {
  610. int ret;
  611. u32 dw1 = MI_SEMAPHORE_MBOX |
  612. MI_SEMAPHORE_COMPARE |
  613. MI_SEMAPHORE_REGISTER;
  614. /* Throughout all of the GEM code, seqno passed implies our current
  615. * seqno is >= the last seqno executed. However for hardware the
  616. * comparison is strictly greater than.
  617. */
  618. seqno -= 1;
  619. WARN_ON(signaller->semaphore_register[waiter->id] ==
  620. MI_SEMAPHORE_SYNC_INVALID);
  621. ret = intel_ring_begin(waiter, 4);
  622. if (ret)
  623. return ret;
  624. /* If seqno wrap happened, omit the wait with no-ops */
  625. if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
  626. intel_ring_emit(waiter,
  627. dw1 |
  628. signaller->semaphore_register[waiter->id]);
  629. intel_ring_emit(waiter, seqno);
  630. intel_ring_emit(waiter, 0);
  631. intel_ring_emit(waiter, MI_NOOP);
  632. } else {
  633. intel_ring_emit(waiter, MI_NOOP);
  634. intel_ring_emit(waiter, MI_NOOP);
  635. intel_ring_emit(waiter, MI_NOOP);
  636. intel_ring_emit(waiter, MI_NOOP);
  637. }
  638. intel_ring_advance(waiter);
  639. return 0;
  640. }
  641. #define PIPE_CONTROL_FLUSH(ring__, addr__) \
  642. do { \
  643. intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
  644. PIPE_CONTROL_DEPTH_STALL); \
  645. intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
  646. intel_ring_emit(ring__, 0); \
  647. intel_ring_emit(ring__, 0); \
  648. } while (0)
  649. static int
  650. pc_render_add_request(struct intel_ring_buffer *ring)
  651. {
  652. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  653. int ret;
  654. /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
  655. * incoherent with writes to memory, i.e. completely fubar,
  656. * so we need to use PIPE_NOTIFY instead.
  657. *
  658. * However, we also need to workaround the qword write
  659. * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
  660. * memory before requesting an interrupt.
  661. */
  662. ret = intel_ring_begin(ring, 32);
  663. if (ret)
  664. return ret;
  665. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  666. PIPE_CONTROL_WRITE_FLUSH |
  667. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
  668. intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  669. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  670. intel_ring_emit(ring, 0);
  671. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  672. scratch_addr += 128; /* write to separate cachelines */
  673. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  674. scratch_addr += 128;
  675. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  676. scratch_addr += 128;
  677. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  678. scratch_addr += 128;
  679. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  680. scratch_addr += 128;
  681. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  682. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  683. PIPE_CONTROL_WRITE_FLUSH |
  684. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
  685. PIPE_CONTROL_NOTIFY);
  686. intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  687. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  688. intel_ring_emit(ring, 0);
  689. __intel_ring_advance(ring);
  690. return 0;
  691. }
  692. static u32
  693. gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  694. {
  695. /* Workaround to force correct ordering between irq and seqno writes on
  696. * ivb (and maybe also on snb) by reading from a CS register (like
  697. * ACTHD) before reading the status page. */
  698. if (!lazy_coherency)
  699. intel_ring_get_active_head(ring);
  700. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  701. }
  702. static u32
  703. ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  704. {
  705. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  706. }
  707. static void
  708. ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
  709. {
  710. intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
  711. }
  712. static u32
  713. pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  714. {
  715. return ring->scratch.cpu_page[0];
  716. }
  717. static void
  718. pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
  719. {
  720. ring->scratch.cpu_page[0] = seqno;
  721. }
  722. static bool
  723. gen5_ring_get_irq(struct intel_ring_buffer *ring)
  724. {
  725. struct drm_device *dev = ring->dev;
  726. drm_i915_private_t *dev_priv = dev->dev_private;
  727. unsigned long flags;
  728. if (!dev->irq_enabled)
  729. return false;
  730. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  731. if (ring->irq_refcount++ == 0)
  732. ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
  733. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  734. return true;
  735. }
  736. static void
  737. gen5_ring_put_irq(struct intel_ring_buffer *ring)
  738. {
  739. struct drm_device *dev = ring->dev;
  740. drm_i915_private_t *dev_priv = dev->dev_private;
  741. unsigned long flags;
  742. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  743. if (--ring->irq_refcount == 0)
  744. ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
  745. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  746. }
  747. static bool
  748. i9xx_ring_get_irq(struct intel_ring_buffer *ring)
  749. {
  750. struct drm_device *dev = ring->dev;
  751. drm_i915_private_t *dev_priv = dev->dev_private;
  752. unsigned long flags;
  753. if (!dev->irq_enabled)
  754. return false;
  755. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  756. if (ring->irq_refcount++ == 0) {
  757. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  758. I915_WRITE(IMR, dev_priv->irq_mask);
  759. POSTING_READ(IMR);
  760. }
  761. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  762. return true;
  763. }
  764. static void
  765. i9xx_ring_put_irq(struct intel_ring_buffer *ring)
  766. {
  767. struct drm_device *dev = ring->dev;
  768. drm_i915_private_t *dev_priv = dev->dev_private;
  769. unsigned long flags;
  770. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  771. if (--ring->irq_refcount == 0) {
  772. dev_priv->irq_mask |= ring->irq_enable_mask;
  773. I915_WRITE(IMR, dev_priv->irq_mask);
  774. POSTING_READ(IMR);
  775. }
  776. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  777. }
  778. static bool
  779. i8xx_ring_get_irq(struct intel_ring_buffer *ring)
  780. {
  781. struct drm_device *dev = ring->dev;
  782. drm_i915_private_t *dev_priv = dev->dev_private;
  783. unsigned long flags;
  784. if (!dev->irq_enabled)
  785. return false;
  786. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  787. if (ring->irq_refcount++ == 0) {
  788. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  789. I915_WRITE16(IMR, dev_priv->irq_mask);
  790. POSTING_READ16(IMR);
  791. }
  792. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  793. return true;
  794. }
  795. static void
  796. i8xx_ring_put_irq(struct intel_ring_buffer *ring)
  797. {
  798. struct drm_device *dev = ring->dev;
  799. drm_i915_private_t *dev_priv = dev->dev_private;
  800. unsigned long flags;
  801. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  802. if (--ring->irq_refcount == 0) {
  803. dev_priv->irq_mask |= ring->irq_enable_mask;
  804. I915_WRITE16(IMR, dev_priv->irq_mask);
  805. POSTING_READ16(IMR);
  806. }
  807. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  808. }
  809. void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
  810. {
  811. struct drm_device *dev = ring->dev;
  812. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  813. u32 mmio = 0;
  814. /* The ring status page addresses are no longer next to the rest of
  815. * the ring registers as of gen7.
  816. */
  817. if (IS_GEN7(dev)) {
  818. switch (ring->id) {
  819. case RCS:
  820. mmio = RENDER_HWS_PGA_GEN7;
  821. break;
  822. case BCS:
  823. mmio = BLT_HWS_PGA_GEN7;
  824. break;
  825. case VCS:
  826. mmio = BSD_HWS_PGA_GEN7;
  827. break;
  828. case VECS:
  829. mmio = VEBOX_HWS_PGA_GEN7;
  830. break;
  831. }
  832. } else if (IS_GEN6(ring->dev)) {
  833. mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
  834. } else {
  835. /* XXX: gen8 returns to sanity */
  836. mmio = RING_HWS_PGA(ring->mmio_base);
  837. }
  838. I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
  839. POSTING_READ(mmio);
  840. /* Flush the TLB for this page */
  841. if (INTEL_INFO(dev)->gen >= 6) {
  842. u32 reg = RING_INSTPM(ring->mmio_base);
  843. I915_WRITE(reg,
  844. _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
  845. INSTPM_SYNC_FLUSH));
  846. if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
  847. 1000))
  848. DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
  849. ring->name);
  850. }
  851. }
  852. static int
  853. bsd_ring_flush(struct intel_ring_buffer *ring,
  854. u32 invalidate_domains,
  855. u32 flush_domains)
  856. {
  857. int ret;
  858. ret = intel_ring_begin(ring, 2);
  859. if (ret)
  860. return ret;
  861. intel_ring_emit(ring, MI_FLUSH);
  862. intel_ring_emit(ring, MI_NOOP);
  863. intel_ring_advance(ring);
  864. return 0;
  865. }
  866. static int
  867. i9xx_add_request(struct intel_ring_buffer *ring)
  868. {
  869. int ret;
  870. ret = intel_ring_begin(ring, 4);
  871. if (ret)
  872. return ret;
  873. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  874. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  875. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  876. intel_ring_emit(ring, MI_USER_INTERRUPT);
  877. __intel_ring_advance(ring);
  878. return 0;
  879. }
  880. static bool
  881. gen6_ring_get_irq(struct intel_ring_buffer *ring)
  882. {
  883. struct drm_device *dev = ring->dev;
  884. drm_i915_private_t *dev_priv = dev->dev_private;
  885. unsigned long flags;
  886. if (!dev->irq_enabled)
  887. return false;
  888. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  889. if (ring->irq_refcount++ == 0) {
  890. if (HAS_L3_DPF(dev) && ring->id == RCS)
  891. I915_WRITE_IMR(ring,
  892. ~(ring->irq_enable_mask |
  893. GT_PARITY_ERROR(dev)));
  894. else
  895. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  896. ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
  897. }
  898. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  899. return true;
  900. }
  901. static void
  902. gen6_ring_put_irq(struct intel_ring_buffer *ring)
  903. {
  904. struct drm_device *dev = ring->dev;
  905. drm_i915_private_t *dev_priv = dev->dev_private;
  906. unsigned long flags;
  907. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  908. if (--ring->irq_refcount == 0) {
  909. if (HAS_L3_DPF(dev) && ring->id == RCS)
  910. I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
  911. else
  912. I915_WRITE_IMR(ring, ~0);
  913. ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
  914. }
  915. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  916. }
  917. static bool
  918. hsw_vebox_get_irq(struct intel_ring_buffer *ring)
  919. {
  920. struct drm_device *dev = ring->dev;
  921. struct drm_i915_private *dev_priv = dev->dev_private;
  922. unsigned long flags;
  923. if (!dev->irq_enabled)
  924. return false;
  925. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  926. if (ring->irq_refcount++ == 0) {
  927. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  928. snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
  929. }
  930. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  931. return true;
  932. }
  933. static void
  934. hsw_vebox_put_irq(struct intel_ring_buffer *ring)
  935. {
  936. struct drm_device *dev = ring->dev;
  937. struct drm_i915_private *dev_priv = dev->dev_private;
  938. unsigned long flags;
  939. if (!dev->irq_enabled)
  940. return;
  941. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  942. if (--ring->irq_refcount == 0) {
  943. I915_WRITE_IMR(ring, ~0);
  944. snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
  945. }
  946. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  947. }
  948. static bool
  949. gen8_ring_get_irq(struct intel_ring_buffer *ring)
  950. {
  951. struct drm_device *dev = ring->dev;
  952. struct drm_i915_private *dev_priv = dev->dev_private;
  953. unsigned long flags;
  954. if (!dev->irq_enabled)
  955. return false;
  956. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  957. if (ring->irq_refcount++ == 0) {
  958. if (HAS_L3_DPF(dev) && ring->id == RCS) {
  959. I915_WRITE_IMR(ring,
  960. ~(ring->irq_enable_mask |
  961. GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
  962. } else {
  963. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  964. }
  965. POSTING_READ(RING_IMR(ring->mmio_base));
  966. }
  967. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  968. return true;
  969. }
  970. static void
  971. gen8_ring_put_irq(struct intel_ring_buffer *ring)
  972. {
  973. struct drm_device *dev = ring->dev;
  974. struct drm_i915_private *dev_priv = dev->dev_private;
  975. unsigned long flags;
  976. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  977. if (--ring->irq_refcount == 0) {
  978. if (HAS_L3_DPF(dev) && ring->id == RCS) {
  979. I915_WRITE_IMR(ring,
  980. ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
  981. } else {
  982. I915_WRITE_IMR(ring, ~0);
  983. }
  984. POSTING_READ(RING_IMR(ring->mmio_base));
  985. }
  986. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  987. }
  988. static int
  989. i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
  990. u32 offset, u32 length,
  991. unsigned flags)
  992. {
  993. int ret;
  994. ret = intel_ring_begin(ring, 2);
  995. if (ret)
  996. return ret;
  997. intel_ring_emit(ring,
  998. MI_BATCH_BUFFER_START |
  999. MI_BATCH_GTT |
  1000. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
  1001. intel_ring_emit(ring, offset);
  1002. intel_ring_advance(ring);
  1003. return 0;
  1004. }
  1005. /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
  1006. #define I830_BATCH_LIMIT (256*1024)
  1007. static int
  1008. i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1009. u32 offset, u32 len,
  1010. unsigned flags)
  1011. {
  1012. int ret;
  1013. if (flags & I915_DISPATCH_PINNED) {
  1014. ret = intel_ring_begin(ring, 4);
  1015. if (ret)
  1016. return ret;
  1017. intel_ring_emit(ring, MI_BATCH_BUFFER);
  1018. intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  1019. intel_ring_emit(ring, offset + len - 8);
  1020. intel_ring_emit(ring, MI_NOOP);
  1021. intel_ring_advance(ring);
  1022. } else {
  1023. u32 cs_offset = ring->scratch.gtt_offset;
  1024. if (len > I830_BATCH_LIMIT)
  1025. return -ENOSPC;
  1026. ret = intel_ring_begin(ring, 9+3);
  1027. if (ret)
  1028. return ret;
  1029. /* Blit the batch (which has now all relocs applied) to the stable batch
  1030. * scratch bo area (so that the CS never stumbles over its tlb
  1031. * invalidation bug) ... */
  1032. intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
  1033. XY_SRC_COPY_BLT_WRITE_ALPHA |
  1034. XY_SRC_COPY_BLT_WRITE_RGB);
  1035. intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
  1036. intel_ring_emit(ring, 0);
  1037. intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
  1038. intel_ring_emit(ring, cs_offset);
  1039. intel_ring_emit(ring, 0);
  1040. intel_ring_emit(ring, 4096);
  1041. intel_ring_emit(ring, offset);
  1042. intel_ring_emit(ring, MI_FLUSH);
  1043. /* ... and execute it. */
  1044. intel_ring_emit(ring, MI_BATCH_BUFFER);
  1045. intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  1046. intel_ring_emit(ring, cs_offset + len - 8);
  1047. intel_ring_advance(ring);
  1048. }
  1049. return 0;
  1050. }
  1051. static int
  1052. i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1053. u32 offset, u32 len,
  1054. unsigned flags)
  1055. {
  1056. int ret;
  1057. ret = intel_ring_begin(ring, 2);
  1058. if (ret)
  1059. return ret;
  1060. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
  1061. intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  1062. intel_ring_advance(ring);
  1063. return 0;
  1064. }
  1065. static void cleanup_status_page(struct intel_ring_buffer *ring)
  1066. {
  1067. struct drm_i915_gem_object *obj;
  1068. obj = ring->status_page.obj;
  1069. if (obj == NULL)
  1070. return;
  1071. kunmap(sg_page(obj->pages->sgl));
  1072. i915_gem_object_ggtt_unpin(obj);
  1073. drm_gem_object_unreference(&obj->base);
  1074. ring->status_page.obj = NULL;
  1075. }
  1076. static int init_status_page(struct intel_ring_buffer *ring)
  1077. {
  1078. struct drm_device *dev = ring->dev;
  1079. struct drm_i915_gem_object *obj;
  1080. int ret;
  1081. obj = i915_gem_alloc_object(dev, 4096);
  1082. if (obj == NULL) {
  1083. DRM_ERROR("Failed to allocate status page\n");
  1084. ret = -ENOMEM;
  1085. goto err;
  1086. }
  1087. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  1088. ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_MAPPABLE);
  1089. if (ret)
  1090. goto err_unref;
  1091. ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
  1092. ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
  1093. if (ring->status_page.page_addr == NULL) {
  1094. ret = -ENOMEM;
  1095. goto err_unpin;
  1096. }
  1097. ring->status_page.obj = obj;
  1098. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  1099. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  1100. ring->name, ring->status_page.gfx_addr);
  1101. return 0;
  1102. err_unpin:
  1103. i915_gem_object_ggtt_unpin(obj);
  1104. err_unref:
  1105. drm_gem_object_unreference(&obj->base);
  1106. err:
  1107. return ret;
  1108. }
  1109. static int init_phys_status_page(struct intel_ring_buffer *ring)
  1110. {
  1111. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1112. if (!dev_priv->status_page_dmah) {
  1113. dev_priv->status_page_dmah =
  1114. drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
  1115. if (!dev_priv->status_page_dmah)
  1116. return -ENOMEM;
  1117. }
  1118. ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1119. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  1120. return 0;
  1121. }
  1122. static int intel_init_ring_buffer(struct drm_device *dev,
  1123. struct intel_ring_buffer *ring)
  1124. {
  1125. struct drm_i915_gem_object *obj;
  1126. struct drm_i915_private *dev_priv = dev->dev_private;
  1127. int ret;
  1128. ring->dev = dev;
  1129. INIT_LIST_HEAD(&ring->active_list);
  1130. INIT_LIST_HEAD(&ring->request_list);
  1131. ring->size = 32 * PAGE_SIZE;
  1132. memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
  1133. init_waitqueue_head(&ring->irq_queue);
  1134. if (I915_NEED_GFX_HWS(dev)) {
  1135. ret = init_status_page(ring);
  1136. if (ret)
  1137. return ret;
  1138. } else {
  1139. BUG_ON(ring->id != RCS);
  1140. ret = init_phys_status_page(ring);
  1141. if (ret)
  1142. return ret;
  1143. }
  1144. obj = NULL;
  1145. if (!HAS_LLC(dev))
  1146. obj = i915_gem_object_create_stolen(dev, ring->size);
  1147. if (obj == NULL)
  1148. obj = i915_gem_alloc_object(dev, ring->size);
  1149. if (obj == NULL) {
  1150. DRM_ERROR("Failed to allocate ringbuffer\n");
  1151. ret = -ENOMEM;
  1152. goto err_hws;
  1153. }
  1154. ring->obj = obj;
  1155. ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
  1156. if (ret)
  1157. goto err_unref;
  1158. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  1159. if (ret)
  1160. goto err_unpin;
  1161. ring->virtual_start =
  1162. ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
  1163. ring->size);
  1164. if (ring->virtual_start == NULL) {
  1165. DRM_ERROR("Failed to map ringbuffer.\n");
  1166. ret = -EINVAL;
  1167. goto err_unpin;
  1168. }
  1169. ret = ring->init(ring);
  1170. if (ret)
  1171. goto err_unmap;
  1172. /* Workaround an erratum on the i830 which causes a hang if
  1173. * the TAIL pointer points to within the last 2 cachelines
  1174. * of the buffer.
  1175. */
  1176. ring->effective_size = ring->size;
  1177. if (IS_I830(ring->dev) || IS_845G(ring->dev))
  1178. ring->effective_size -= 128;
  1179. return 0;
  1180. err_unmap:
  1181. iounmap(ring->virtual_start);
  1182. err_unpin:
  1183. i915_gem_object_ggtt_unpin(obj);
  1184. err_unref:
  1185. drm_gem_object_unreference(&obj->base);
  1186. ring->obj = NULL;
  1187. err_hws:
  1188. cleanup_status_page(ring);
  1189. return ret;
  1190. }
  1191. void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
  1192. {
  1193. struct drm_i915_private *dev_priv;
  1194. int ret;
  1195. if (ring->obj == NULL)
  1196. return;
  1197. /* Disable the ring buffer. The ring must be idle at this point */
  1198. dev_priv = ring->dev->dev_private;
  1199. ret = intel_ring_idle(ring);
  1200. if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
  1201. DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
  1202. ring->name, ret);
  1203. I915_WRITE_CTL(ring, 0);
  1204. iounmap(ring->virtual_start);
  1205. i915_gem_object_ggtt_unpin(ring->obj);
  1206. drm_gem_object_unreference(&ring->obj->base);
  1207. ring->obj = NULL;
  1208. ring->preallocated_lazy_request = NULL;
  1209. ring->outstanding_lazy_seqno = 0;
  1210. if (ring->cleanup)
  1211. ring->cleanup(ring);
  1212. cleanup_status_page(ring);
  1213. }
  1214. static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
  1215. {
  1216. struct drm_i915_gem_request *request;
  1217. u32 seqno = 0, tail;
  1218. int ret;
  1219. if (ring->last_retired_head != -1) {
  1220. ring->head = ring->last_retired_head;
  1221. ring->last_retired_head = -1;
  1222. ring->space = ring_space(ring);
  1223. if (ring->space >= n)
  1224. return 0;
  1225. }
  1226. list_for_each_entry(request, &ring->request_list, list) {
  1227. int space;
  1228. if (request->tail == -1)
  1229. continue;
  1230. space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
  1231. if (space < 0)
  1232. space += ring->size;
  1233. if (space >= n) {
  1234. seqno = request->seqno;
  1235. tail = request->tail;
  1236. break;
  1237. }
  1238. /* Consume this request in case we need more space than
  1239. * is available and so need to prevent a race between
  1240. * updating last_retired_head and direct reads of
  1241. * I915_RING_HEAD. It also provides a nice sanity check.
  1242. */
  1243. request->tail = -1;
  1244. }
  1245. if (seqno == 0)
  1246. return -ENOSPC;
  1247. ret = i915_wait_seqno(ring, seqno);
  1248. if (ret)
  1249. return ret;
  1250. ring->head = tail;
  1251. ring->space = ring_space(ring);
  1252. if (WARN_ON(ring->space < n))
  1253. return -ENOSPC;
  1254. return 0;
  1255. }
  1256. static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
  1257. {
  1258. struct drm_device *dev = ring->dev;
  1259. struct drm_i915_private *dev_priv = dev->dev_private;
  1260. unsigned long end;
  1261. int ret;
  1262. ret = intel_ring_wait_request(ring, n);
  1263. if (ret != -ENOSPC)
  1264. return ret;
  1265. /* force the tail write in case we have been skipping them */
  1266. __intel_ring_advance(ring);
  1267. trace_i915_ring_wait_begin(ring);
  1268. /* With GEM the hangcheck timer should kick us out of the loop,
  1269. * leaving it early runs the risk of corrupting GEM state (due
  1270. * to running on almost untested codepaths). But on resume
  1271. * timers don't work yet, so prevent a complete hang in that
  1272. * case by choosing an insanely large timeout. */
  1273. end = jiffies + 60 * HZ;
  1274. do {
  1275. ring->head = I915_READ_HEAD(ring);
  1276. ring->space = ring_space(ring);
  1277. if (ring->space >= n) {
  1278. trace_i915_ring_wait_end(ring);
  1279. return 0;
  1280. }
  1281. if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
  1282. dev->primary->master) {
  1283. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  1284. if (master_priv->sarea_priv)
  1285. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  1286. }
  1287. msleep(1);
  1288. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  1289. dev_priv->mm.interruptible);
  1290. if (ret)
  1291. return ret;
  1292. } while (!time_after(jiffies, end));
  1293. trace_i915_ring_wait_end(ring);
  1294. return -EBUSY;
  1295. }
  1296. static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
  1297. {
  1298. uint32_t __iomem *virt;
  1299. int rem = ring->size - ring->tail;
  1300. if (ring->space < rem) {
  1301. int ret = ring_wait_for_space(ring, rem);
  1302. if (ret)
  1303. return ret;
  1304. }
  1305. virt = ring->virtual_start + ring->tail;
  1306. rem /= 4;
  1307. while (rem--)
  1308. iowrite32(MI_NOOP, virt++);
  1309. ring->tail = 0;
  1310. ring->space = ring_space(ring);
  1311. return 0;
  1312. }
  1313. int intel_ring_idle(struct intel_ring_buffer *ring)
  1314. {
  1315. u32 seqno;
  1316. int ret;
  1317. /* We need to add any requests required to flush the objects and ring */
  1318. if (ring->outstanding_lazy_seqno) {
  1319. ret = i915_add_request(ring, NULL);
  1320. if (ret)
  1321. return ret;
  1322. }
  1323. /* Wait upon the last request to be completed */
  1324. if (list_empty(&ring->request_list))
  1325. return 0;
  1326. seqno = list_entry(ring->request_list.prev,
  1327. struct drm_i915_gem_request,
  1328. list)->seqno;
  1329. return i915_wait_seqno(ring, seqno);
  1330. }
  1331. static int
  1332. intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
  1333. {
  1334. if (ring->outstanding_lazy_seqno)
  1335. return 0;
  1336. if (ring->preallocated_lazy_request == NULL) {
  1337. struct drm_i915_gem_request *request;
  1338. request = kmalloc(sizeof(*request), GFP_KERNEL);
  1339. if (request == NULL)
  1340. return -ENOMEM;
  1341. ring->preallocated_lazy_request = request;
  1342. }
  1343. return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
  1344. }
  1345. static int __intel_ring_prepare(struct intel_ring_buffer *ring,
  1346. int bytes)
  1347. {
  1348. int ret;
  1349. if (unlikely(ring->tail + bytes > ring->effective_size)) {
  1350. ret = intel_wrap_ring_buffer(ring);
  1351. if (unlikely(ret))
  1352. return ret;
  1353. }
  1354. if (unlikely(ring->space < bytes)) {
  1355. ret = ring_wait_for_space(ring, bytes);
  1356. if (unlikely(ret))
  1357. return ret;
  1358. }
  1359. return 0;
  1360. }
  1361. int intel_ring_begin(struct intel_ring_buffer *ring,
  1362. int num_dwords)
  1363. {
  1364. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1365. int ret;
  1366. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  1367. dev_priv->mm.interruptible);
  1368. if (ret)
  1369. return ret;
  1370. ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
  1371. if (ret)
  1372. return ret;
  1373. /* Preallocate the olr before touching the ring */
  1374. ret = intel_ring_alloc_seqno(ring);
  1375. if (ret)
  1376. return ret;
  1377. ring->space -= num_dwords * sizeof(uint32_t);
  1378. return 0;
  1379. }
  1380. void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
  1381. {
  1382. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1383. BUG_ON(ring->outstanding_lazy_seqno);
  1384. if (INTEL_INFO(ring->dev)->gen >= 6) {
  1385. I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
  1386. I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
  1387. if (HAS_VEBOX(ring->dev))
  1388. I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
  1389. }
  1390. ring->set_seqno(ring, seqno);
  1391. ring->hangcheck.seqno = seqno;
  1392. }
  1393. static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
  1394. u32 value)
  1395. {
  1396. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1397. /* Every tail move must follow the sequence below */
  1398. /* Disable notification that the ring is IDLE. The GT
  1399. * will then assume that it is busy and bring it out of rc6.
  1400. */
  1401. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1402. _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1403. /* Clear the context id. Here be magic! */
  1404. I915_WRITE64(GEN6_BSD_RNCID, 0x0);
  1405. /* Wait for the ring not to be idle, i.e. for it to wake up. */
  1406. if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
  1407. GEN6_BSD_SLEEP_INDICATOR) == 0,
  1408. 50))
  1409. DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
  1410. /* Now that the ring is fully powered up, update the tail */
  1411. I915_WRITE_TAIL(ring, value);
  1412. POSTING_READ(RING_TAIL(ring->mmio_base));
  1413. /* Let the ring send IDLE messages to the GT again,
  1414. * and so let it sleep to conserve power when idle.
  1415. */
  1416. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1417. _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1418. }
  1419. static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
  1420. u32 invalidate, u32 flush)
  1421. {
  1422. uint32_t cmd;
  1423. int ret;
  1424. ret = intel_ring_begin(ring, 4);
  1425. if (ret)
  1426. return ret;
  1427. cmd = MI_FLUSH_DW;
  1428. if (INTEL_INFO(ring->dev)->gen >= 8)
  1429. cmd += 1;
  1430. /*
  1431. * Bspec vol 1c.5 - video engine command streamer:
  1432. * "If ENABLED, all TLBs will be invalidated once the flush
  1433. * operation is complete. This bit is only valid when the
  1434. * Post-Sync Operation field is a value of 1h or 3h."
  1435. */
  1436. if (invalidate & I915_GEM_GPU_DOMAINS)
  1437. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
  1438. MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  1439. intel_ring_emit(ring, cmd);
  1440. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  1441. if (INTEL_INFO(ring->dev)->gen >= 8) {
  1442. intel_ring_emit(ring, 0); /* upper addr */
  1443. intel_ring_emit(ring, 0); /* value */
  1444. } else {
  1445. intel_ring_emit(ring, 0);
  1446. intel_ring_emit(ring, MI_NOOP);
  1447. }
  1448. intel_ring_advance(ring);
  1449. return 0;
  1450. }
  1451. static int
  1452. gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1453. u32 offset, u32 len,
  1454. unsigned flags)
  1455. {
  1456. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1457. bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
  1458. !(flags & I915_DISPATCH_SECURE);
  1459. int ret;
  1460. ret = intel_ring_begin(ring, 4);
  1461. if (ret)
  1462. return ret;
  1463. /* FIXME(BDW): Address space and security selectors. */
  1464. intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
  1465. intel_ring_emit(ring, offset);
  1466. intel_ring_emit(ring, 0);
  1467. intel_ring_emit(ring, MI_NOOP);
  1468. intel_ring_advance(ring);
  1469. return 0;
  1470. }
  1471. static int
  1472. hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1473. u32 offset, u32 len,
  1474. unsigned flags)
  1475. {
  1476. int ret;
  1477. ret = intel_ring_begin(ring, 2);
  1478. if (ret)
  1479. return ret;
  1480. intel_ring_emit(ring,
  1481. MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
  1482. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
  1483. /* bit0-7 is the length on GEN6+ */
  1484. intel_ring_emit(ring, offset);
  1485. intel_ring_advance(ring);
  1486. return 0;
  1487. }
  1488. static int
  1489. gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1490. u32 offset, u32 len,
  1491. unsigned flags)
  1492. {
  1493. int ret;
  1494. ret = intel_ring_begin(ring, 2);
  1495. if (ret)
  1496. return ret;
  1497. intel_ring_emit(ring,
  1498. MI_BATCH_BUFFER_START |
  1499. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
  1500. /* bit0-7 is the length on GEN6+ */
  1501. intel_ring_emit(ring, offset);
  1502. intel_ring_advance(ring);
  1503. return 0;
  1504. }
  1505. /* Blitter support (SandyBridge+) */
  1506. static int gen6_ring_flush(struct intel_ring_buffer *ring,
  1507. u32 invalidate, u32 flush)
  1508. {
  1509. struct drm_device *dev = ring->dev;
  1510. uint32_t cmd;
  1511. int ret;
  1512. ret = intel_ring_begin(ring, 4);
  1513. if (ret)
  1514. return ret;
  1515. cmd = MI_FLUSH_DW;
  1516. if (INTEL_INFO(ring->dev)->gen >= 8)
  1517. cmd += 1;
  1518. /*
  1519. * Bspec vol 1c.3 - blitter engine command streamer:
  1520. * "If ENABLED, all TLBs will be invalidated once the flush
  1521. * operation is complete. This bit is only valid when the
  1522. * Post-Sync Operation field is a value of 1h or 3h."
  1523. */
  1524. if (invalidate & I915_GEM_DOMAIN_RENDER)
  1525. cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
  1526. MI_FLUSH_DW_OP_STOREDW;
  1527. intel_ring_emit(ring, cmd);
  1528. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  1529. if (INTEL_INFO(ring->dev)->gen >= 8) {
  1530. intel_ring_emit(ring, 0); /* upper addr */
  1531. intel_ring_emit(ring, 0); /* value */
  1532. } else {
  1533. intel_ring_emit(ring, 0);
  1534. intel_ring_emit(ring, MI_NOOP);
  1535. }
  1536. intel_ring_advance(ring);
  1537. if (IS_GEN7(dev) && !invalidate && flush)
  1538. return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
  1539. return 0;
  1540. }
  1541. int intel_init_render_ring_buffer(struct drm_device *dev)
  1542. {
  1543. drm_i915_private_t *dev_priv = dev->dev_private;
  1544. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1545. ring->name = "render ring";
  1546. ring->id = RCS;
  1547. ring->mmio_base = RENDER_RING_BASE;
  1548. if (INTEL_INFO(dev)->gen >= 6) {
  1549. ring->add_request = gen6_add_request;
  1550. ring->flush = gen7_render_ring_flush;
  1551. if (INTEL_INFO(dev)->gen == 6)
  1552. ring->flush = gen6_render_ring_flush;
  1553. if (INTEL_INFO(dev)->gen >= 8) {
  1554. ring->flush = gen8_render_ring_flush;
  1555. ring->irq_get = gen8_ring_get_irq;
  1556. ring->irq_put = gen8_ring_put_irq;
  1557. } else {
  1558. ring->irq_get = gen6_ring_get_irq;
  1559. ring->irq_put = gen6_ring_put_irq;
  1560. }
  1561. ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
  1562. ring->get_seqno = gen6_ring_get_seqno;
  1563. ring->set_seqno = ring_set_seqno;
  1564. ring->sync_to = gen6_ring_sync;
  1565. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
  1566. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
  1567. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
  1568. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
  1569. ring->signal_mbox[RCS] = GEN6_NOSYNC;
  1570. ring->signal_mbox[VCS] = GEN6_VRSYNC;
  1571. ring->signal_mbox[BCS] = GEN6_BRSYNC;
  1572. ring->signal_mbox[VECS] = GEN6_VERSYNC;
  1573. } else if (IS_GEN5(dev)) {
  1574. ring->add_request = pc_render_add_request;
  1575. ring->flush = gen4_render_ring_flush;
  1576. ring->get_seqno = pc_render_get_seqno;
  1577. ring->set_seqno = pc_render_set_seqno;
  1578. ring->irq_get = gen5_ring_get_irq;
  1579. ring->irq_put = gen5_ring_put_irq;
  1580. ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
  1581. GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
  1582. } else {
  1583. ring->add_request = i9xx_add_request;
  1584. if (INTEL_INFO(dev)->gen < 4)
  1585. ring->flush = gen2_render_ring_flush;
  1586. else
  1587. ring->flush = gen4_render_ring_flush;
  1588. ring->get_seqno = ring_get_seqno;
  1589. ring->set_seqno = ring_set_seqno;
  1590. if (IS_GEN2(dev)) {
  1591. ring->irq_get = i8xx_ring_get_irq;
  1592. ring->irq_put = i8xx_ring_put_irq;
  1593. } else {
  1594. ring->irq_get = i9xx_ring_get_irq;
  1595. ring->irq_put = i9xx_ring_put_irq;
  1596. }
  1597. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1598. }
  1599. ring->write_tail = ring_write_tail;
  1600. if (IS_HASWELL(dev))
  1601. ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
  1602. else if (IS_GEN8(dev))
  1603. ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
  1604. else if (INTEL_INFO(dev)->gen >= 6)
  1605. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1606. else if (INTEL_INFO(dev)->gen >= 4)
  1607. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1608. else if (IS_I830(dev) || IS_845G(dev))
  1609. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1610. else
  1611. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1612. ring->init = init_render_ring;
  1613. ring->cleanup = render_ring_cleanup;
  1614. /* Workaround batchbuffer to combat CS tlb bug. */
  1615. if (HAS_BROKEN_CS_TLB(dev)) {
  1616. struct drm_i915_gem_object *obj;
  1617. int ret;
  1618. obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
  1619. if (obj == NULL) {
  1620. DRM_ERROR("Failed to allocate batch bo\n");
  1621. return -ENOMEM;
  1622. }
  1623. ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
  1624. if (ret != 0) {
  1625. drm_gem_object_unreference(&obj->base);
  1626. DRM_ERROR("Failed to ping batch bo\n");
  1627. return ret;
  1628. }
  1629. ring->scratch.obj = obj;
  1630. ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
  1631. }
  1632. return intel_init_ring_buffer(dev, ring);
  1633. }
  1634. int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
  1635. {
  1636. drm_i915_private_t *dev_priv = dev->dev_private;
  1637. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1638. int ret;
  1639. ring->name = "render ring";
  1640. ring->id = RCS;
  1641. ring->mmio_base = RENDER_RING_BASE;
  1642. if (INTEL_INFO(dev)->gen >= 6) {
  1643. /* non-kms not supported on gen6+ */
  1644. return -ENODEV;
  1645. }
  1646. /* Note: gem is not supported on gen5/ilk without kms (the corresponding
  1647. * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
  1648. * the special gen5 functions. */
  1649. ring->add_request = i9xx_add_request;
  1650. if (INTEL_INFO(dev)->gen < 4)
  1651. ring->flush = gen2_render_ring_flush;
  1652. else
  1653. ring->flush = gen4_render_ring_flush;
  1654. ring->get_seqno = ring_get_seqno;
  1655. ring->set_seqno = ring_set_seqno;
  1656. if (IS_GEN2(dev)) {
  1657. ring->irq_get = i8xx_ring_get_irq;
  1658. ring->irq_put = i8xx_ring_put_irq;
  1659. } else {
  1660. ring->irq_get = i9xx_ring_get_irq;
  1661. ring->irq_put = i9xx_ring_put_irq;
  1662. }
  1663. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1664. ring->write_tail = ring_write_tail;
  1665. if (INTEL_INFO(dev)->gen >= 4)
  1666. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1667. else if (IS_I830(dev) || IS_845G(dev))
  1668. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1669. else
  1670. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1671. ring->init = init_render_ring;
  1672. ring->cleanup = render_ring_cleanup;
  1673. ring->dev = dev;
  1674. INIT_LIST_HEAD(&ring->active_list);
  1675. INIT_LIST_HEAD(&ring->request_list);
  1676. ring->size = size;
  1677. ring->effective_size = ring->size;
  1678. if (IS_I830(ring->dev) || IS_845G(ring->dev))
  1679. ring->effective_size -= 128;
  1680. ring->virtual_start = ioremap_wc(start, size);
  1681. if (ring->virtual_start == NULL) {
  1682. DRM_ERROR("can not ioremap virtual address for"
  1683. " ring buffer\n");
  1684. return -ENOMEM;
  1685. }
  1686. if (!I915_NEED_GFX_HWS(dev)) {
  1687. ret = init_phys_status_page(ring);
  1688. if (ret)
  1689. return ret;
  1690. }
  1691. return 0;
  1692. }
  1693. int intel_init_bsd_ring_buffer(struct drm_device *dev)
  1694. {
  1695. drm_i915_private_t *dev_priv = dev->dev_private;
  1696. struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
  1697. ring->name = "bsd ring";
  1698. ring->id = VCS;
  1699. ring->write_tail = ring_write_tail;
  1700. if (INTEL_INFO(dev)->gen >= 6) {
  1701. ring->mmio_base = GEN6_BSD_RING_BASE;
  1702. /* gen6 bsd needs a special wa for tail updates */
  1703. if (IS_GEN6(dev))
  1704. ring->write_tail = gen6_bsd_ring_write_tail;
  1705. ring->flush = gen6_bsd_ring_flush;
  1706. ring->add_request = gen6_add_request;
  1707. ring->get_seqno = gen6_ring_get_seqno;
  1708. ring->set_seqno = ring_set_seqno;
  1709. if (INTEL_INFO(dev)->gen >= 8) {
  1710. ring->irq_enable_mask =
  1711. GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
  1712. ring->irq_get = gen8_ring_get_irq;
  1713. ring->irq_put = gen8_ring_put_irq;
  1714. ring->dispatch_execbuffer =
  1715. gen8_ring_dispatch_execbuffer;
  1716. } else {
  1717. ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
  1718. ring->irq_get = gen6_ring_get_irq;
  1719. ring->irq_put = gen6_ring_put_irq;
  1720. ring->dispatch_execbuffer =
  1721. gen6_ring_dispatch_execbuffer;
  1722. }
  1723. ring->sync_to = gen6_ring_sync;
  1724. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
  1725. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
  1726. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
  1727. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
  1728. ring->signal_mbox[RCS] = GEN6_RVSYNC;
  1729. ring->signal_mbox[VCS] = GEN6_NOSYNC;
  1730. ring->signal_mbox[BCS] = GEN6_BVSYNC;
  1731. ring->signal_mbox[VECS] = GEN6_VEVSYNC;
  1732. } else {
  1733. ring->mmio_base = BSD_RING_BASE;
  1734. ring->flush = bsd_ring_flush;
  1735. ring->add_request = i9xx_add_request;
  1736. ring->get_seqno = ring_get_seqno;
  1737. ring->set_seqno = ring_set_seqno;
  1738. if (IS_GEN5(dev)) {
  1739. ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
  1740. ring->irq_get = gen5_ring_get_irq;
  1741. ring->irq_put = gen5_ring_put_irq;
  1742. } else {
  1743. ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
  1744. ring->irq_get = i9xx_ring_get_irq;
  1745. ring->irq_put = i9xx_ring_put_irq;
  1746. }
  1747. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1748. }
  1749. ring->init = init_ring_common;
  1750. return intel_init_ring_buffer(dev, ring);
  1751. }
  1752. int intel_init_blt_ring_buffer(struct drm_device *dev)
  1753. {
  1754. drm_i915_private_t *dev_priv = dev->dev_private;
  1755. struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
  1756. ring->name = "blitter ring";
  1757. ring->id = BCS;
  1758. ring->mmio_base = BLT_RING_BASE;
  1759. ring->write_tail = ring_write_tail;
  1760. ring->flush = gen6_ring_flush;
  1761. ring->add_request = gen6_add_request;
  1762. ring->get_seqno = gen6_ring_get_seqno;
  1763. ring->set_seqno = ring_set_seqno;
  1764. if (INTEL_INFO(dev)->gen >= 8) {
  1765. ring->irq_enable_mask =
  1766. GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
  1767. ring->irq_get = gen8_ring_get_irq;
  1768. ring->irq_put = gen8_ring_put_irq;
  1769. ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
  1770. } else {
  1771. ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
  1772. ring->irq_get = gen6_ring_get_irq;
  1773. ring->irq_put = gen6_ring_put_irq;
  1774. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1775. }
  1776. ring->sync_to = gen6_ring_sync;
  1777. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
  1778. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
  1779. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
  1780. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
  1781. ring->signal_mbox[RCS] = GEN6_RBSYNC;
  1782. ring->signal_mbox[VCS] = GEN6_VBSYNC;
  1783. ring->signal_mbox[BCS] = GEN6_NOSYNC;
  1784. ring->signal_mbox[VECS] = GEN6_VEBSYNC;
  1785. ring->init = init_ring_common;
  1786. return intel_init_ring_buffer(dev, ring);
  1787. }
  1788. int intel_init_vebox_ring_buffer(struct drm_device *dev)
  1789. {
  1790. drm_i915_private_t *dev_priv = dev->dev_private;
  1791. struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
  1792. ring->name = "video enhancement ring";
  1793. ring->id = VECS;
  1794. ring->mmio_base = VEBOX_RING_BASE;
  1795. ring->write_tail = ring_write_tail;
  1796. ring->flush = gen6_ring_flush;
  1797. ring->add_request = gen6_add_request;
  1798. ring->get_seqno = gen6_ring_get_seqno;
  1799. ring->set_seqno = ring_set_seqno;
  1800. if (INTEL_INFO(dev)->gen >= 8) {
  1801. ring->irq_enable_mask =
  1802. GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
  1803. ring->irq_get = gen8_ring_get_irq;
  1804. ring->irq_put = gen8_ring_put_irq;
  1805. ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
  1806. } else {
  1807. ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
  1808. ring->irq_get = hsw_vebox_get_irq;
  1809. ring->irq_put = hsw_vebox_put_irq;
  1810. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1811. }
  1812. ring->sync_to = gen6_ring_sync;
  1813. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
  1814. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
  1815. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
  1816. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
  1817. ring->signal_mbox[RCS] = GEN6_RVESYNC;
  1818. ring->signal_mbox[VCS] = GEN6_VVESYNC;
  1819. ring->signal_mbox[BCS] = GEN6_BVESYNC;
  1820. ring->signal_mbox[VECS] = GEN6_NOSYNC;
  1821. ring->init = init_ring_common;
  1822. return intel_init_ring_buffer(dev, ring);
  1823. }
  1824. int
  1825. intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
  1826. {
  1827. int ret;
  1828. if (!ring->gpu_caches_dirty)
  1829. return 0;
  1830. ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
  1831. if (ret)
  1832. return ret;
  1833. trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
  1834. ring->gpu_caches_dirty = false;
  1835. return 0;
  1836. }
  1837. int
  1838. intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
  1839. {
  1840. uint32_t flush_domains;
  1841. int ret;
  1842. flush_domains = 0;
  1843. if (ring->gpu_caches_dirty)
  1844. flush_domains = I915_GEM_GPU_DOMAINS;
  1845. ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
  1846. if (ret)
  1847. return ret;
  1848. trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
  1849. ring->gpu_caches_dirty = false;
  1850. return 0;
  1851. }