intel_ringbuffer.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include <drm/drmP.h>
  30. #include "i915_drv.h"
  31. #include <drm/i915_drm.h>
  32. #include "i915_trace.h"
  33. #include "intel_drv.h"
  34. static inline int ring_space(struct intel_ring_buffer *ring)
  35. {
  36. int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
  37. if (space < 0)
  38. space += ring->size;
  39. return space;
  40. }
  41. void __intel_ring_advance(struct intel_ring_buffer *ring)
  42. {
  43. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  44. ring->tail &= ring->size - 1;
  45. if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
  46. return;
  47. ring->write_tail(ring, ring->tail);
  48. }
  49. static int
  50. gen2_render_ring_flush(struct intel_ring_buffer *ring,
  51. u32 invalidate_domains,
  52. u32 flush_domains)
  53. {
  54. u32 cmd;
  55. int ret;
  56. cmd = MI_FLUSH;
  57. if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
  58. cmd |= MI_NO_WRITE_FLUSH;
  59. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  60. cmd |= MI_READ_FLUSH;
  61. ret = intel_ring_begin(ring, 2);
  62. if (ret)
  63. return ret;
  64. intel_ring_emit(ring, cmd);
  65. intel_ring_emit(ring, MI_NOOP);
  66. intel_ring_advance(ring);
  67. return 0;
  68. }
  69. static int
  70. gen4_render_ring_flush(struct intel_ring_buffer *ring,
  71. u32 invalidate_domains,
  72. u32 flush_domains)
  73. {
  74. struct drm_device *dev = ring->dev;
  75. u32 cmd;
  76. int ret;
  77. /*
  78. * read/write caches:
  79. *
  80. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  81. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  82. * also flushed at 2d versus 3d pipeline switches.
  83. *
  84. * read-only caches:
  85. *
  86. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  87. * MI_READ_FLUSH is set, and is always flushed on 965.
  88. *
  89. * I915_GEM_DOMAIN_COMMAND may not exist?
  90. *
  91. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  92. * invalidated when MI_EXE_FLUSH is set.
  93. *
  94. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  95. * invalidated with every MI_FLUSH.
  96. *
  97. * TLBs:
  98. *
  99. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  100. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  101. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  102. * are flushed at any MI_FLUSH.
  103. */
  104. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  105. if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
  106. cmd &= ~MI_NO_WRITE_FLUSH;
  107. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  108. cmd |= MI_EXE_FLUSH;
  109. if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
  110. (IS_G4X(dev) || IS_GEN5(dev)))
  111. cmd |= MI_INVALIDATE_ISP;
  112. ret = intel_ring_begin(ring, 2);
  113. if (ret)
  114. return ret;
  115. intel_ring_emit(ring, cmd);
  116. intel_ring_emit(ring, MI_NOOP);
  117. intel_ring_advance(ring);
  118. return 0;
  119. }
  120. /**
  121. * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  122. * implementing two workarounds on gen6. From section 1.4.7.1
  123. * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
  124. *
  125. * [DevSNB-C+{W/A}] Before any depth stall flush (including those
  126. * produced by non-pipelined state commands), software needs to first
  127. * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
  128. * 0.
  129. *
  130. * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
  131. * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
  132. *
  133. * And the workaround for these two requires this workaround first:
  134. *
  135. * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
  136. * BEFORE the pipe-control with a post-sync op and no write-cache
  137. * flushes.
  138. *
  139. * And this last workaround is tricky because of the requirements on
  140. * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
  141. * volume 2 part 1:
  142. *
  143. * "1 of the following must also be set:
  144. * - Render Target Cache Flush Enable ([12] of DW1)
  145. * - Depth Cache Flush Enable ([0] of DW1)
  146. * - Stall at Pixel Scoreboard ([1] of DW1)
  147. * - Depth Stall ([13] of DW1)
  148. * - Post-Sync Operation ([13] of DW1)
  149. * - Notify Enable ([8] of DW1)"
  150. *
  151. * The cache flushes require the workaround flush that triggered this
  152. * one, so we can't use it. Depth stall would trigger the same.
  153. * Post-sync nonzero is what triggered this second workaround, so we
  154. * can't use that one either. Notify enable is IRQs, which aren't
  155. * really our business. That leaves only stall at scoreboard.
  156. */
  157. static int
  158. intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
  159. {
  160. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  161. int ret;
  162. ret = intel_ring_begin(ring, 6);
  163. if (ret)
  164. return ret;
  165. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  166. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  167. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  168. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  169. intel_ring_emit(ring, 0); /* low dword */
  170. intel_ring_emit(ring, 0); /* high dword */
  171. intel_ring_emit(ring, MI_NOOP);
  172. intel_ring_advance(ring);
  173. ret = intel_ring_begin(ring, 6);
  174. if (ret)
  175. return ret;
  176. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  177. intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
  178. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  179. intel_ring_emit(ring, 0);
  180. intel_ring_emit(ring, 0);
  181. intel_ring_emit(ring, MI_NOOP);
  182. intel_ring_advance(ring);
  183. return 0;
  184. }
  185. static int
  186. gen6_render_ring_flush(struct intel_ring_buffer *ring,
  187. u32 invalidate_domains, u32 flush_domains)
  188. {
  189. u32 flags = 0;
  190. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  191. int ret;
  192. /* Force SNB workarounds for PIPE_CONTROL flushes */
  193. ret = intel_emit_post_sync_nonzero_flush(ring);
  194. if (ret)
  195. return ret;
  196. /* Just flush everything. Experiments have shown that reducing the
  197. * number of bits based on the write domains has little performance
  198. * impact.
  199. */
  200. if (flush_domains) {
  201. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  202. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  203. /*
  204. * Ensure that any following seqno writes only happen
  205. * when the render cache is indeed flushed.
  206. */
  207. flags |= PIPE_CONTROL_CS_STALL;
  208. }
  209. if (invalidate_domains) {
  210. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  211. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  212. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  213. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  214. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  215. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  216. /*
  217. * TLB invalidate requires a post-sync write.
  218. */
  219. flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
  220. }
  221. ret = intel_ring_begin(ring, 4);
  222. if (ret)
  223. return ret;
  224. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  225. intel_ring_emit(ring, flags);
  226. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  227. intel_ring_emit(ring, 0);
  228. intel_ring_advance(ring);
  229. return 0;
  230. }
  231. static int
  232. gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
  233. {
  234. int ret;
  235. ret = intel_ring_begin(ring, 4);
  236. if (ret)
  237. return ret;
  238. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  239. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  240. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  241. intel_ring_emit(ring, 0);
  242. intel_ring_emit(ring, 0);
  243. intel_ring_advance(ring);
  244. return 0;
  245. }
  246. static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
  247. {
  248. int ret;
  249. if (!ring->fbc_dirty)
  250. return 0;
  251. ret = intel_ring_begin(ring, 6);
  252. if (ret)
  253. return ret;
  254. /* WaFbcNukeOn3DBlt:ivb/hsw */
  255. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  256. intel_ring_emit(ring, MSG_FBC_REND_STATE);
  257. intel_ring_emit(ring, value);
  258. intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
  259. intel_ring_emit(ring, MSG_FBC_REND_STATE);
  260. intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
  261. intel_ring_advance(ring);
  262. ring->fbc_dirty = false;
  263. return 0;
  264. }
  265. static int
  266. gen7_render_ring_flush(struct intel_ring_buffer *ring,
  267. u32 invalidate_domains, u32 flush_domains)
  268. {
  269. u32 flags = 0;
  270. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  271. int ret;
  272. /*
  273. * Ensure that any following seqno writes only happen when the render
  274. * cache is indeed flushed.
  275. *
  276. * Workaround: 4th PIPE_CONTROL command (except the ones with only
  277. * read-cache invalidate bits set) must have the CS_STALL bit set. We
  278. * don't try to be clever and just set it unconditionally.
  279. */
  280. flags |= PIPE_CONTROL_CS_STALL;
  281. /* Just flush everything. Experiments have shown that reducing the
  282. * number of bits based on the write domains has little performance
  283. * impact.
  284. */
  285. if (flush_domains) {
  286. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  287. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  288. }
  289. if (invalidate_domains) {
  290. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  291. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  292. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  293. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  294. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  295. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  296. /*
  297. * TLB invalidate requires a post-sync write.
  298. */
  299. flags |= PIPE_CONTROL_QW_WRITE;
  300. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  301. /* Workaround: we must issue a pipe_control with CS-stall bit
  302. * set before a pipe_control command that has the state cache
  303. * invalidate bit set. */
  304. gen7_render_ring_cs_stall_wa(ring);
  305. }
  306. ret = intel_ring_begin(ring, 4);
  307. if (ret)
  308. return ret;
  309. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  310. intel_ring_emit(ring, flags);
  311. intel_ring_emit(ring, scratch_addr);
  312. intel_ring_emit(ring, 0);
  313. intel_ring_advance(ring);
  314. if (!invalidate_domains && flush_domains)
  315. return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
  316. return 0;
  317. }
  318. static int
  319. gen8_render_ring_flush(struct intel_ring_buffer *ring,
  320. u32 invalidate_domains, u32 flush_domains)
  321. {
  322. u32 flags = 0;
  323. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  324. int ret;
  325. flags |= PIPE_CONTROL_CS_STALL;
  326. if (flush_domains) {
  327. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  328. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  329. }
  330. if (invalidate_domains) {
  331. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  332. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  333. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  334. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  335. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  336. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  337. flags |= PIPE_CONTROL_QW_WRITE;
  338. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  339. }
  340. ret = intel_ring_begin(ring, 6);
  341. if (ret)
  342. return ret;
  343. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
  344. intel_ring_emit(ring, flags);
  345. intel_ring_emit(ring, scratch_addr);
  346. intel_ring_emit(ring, 0);
  347. intel_ring_emit(ring, 0);
  348. intel_ring_emit(ring, 0);
  349. intel_ring_advance(ring);
  350. return 0;
  351. }
  352. static void ring_write_tail(struct intel_ring_buffer *ring,
  353. u32 value)
  354. {
  355. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  356. I915_WRITE_TAIL(ring, value);
  357. }
  358. u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
  359. {
  360. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  361. u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
  362. RING_ACTHD(ring->mmio_base) : ACTHD;
  363. return I915_READ(acthd_reg);
  364. }
  365. static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
  366. {
  367. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  368. u32 addr;
  369. addr = dev_priv->status_page_dmah->busaddr;
  370. if (INTEL_INFO(ring->dev)->gen >= 4)
  371. addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  372. I915_WRITE(HWS_PGA, addr);
  373. }
  374. static int init_ring_common(struct intel_ring_buffer *ring)
  375. {
  376. struct drm_device *dev = ring->dev;
  377. drm_i915_private_t *dev_priv = dev->dev_private;
  378. struct drm_i915_gem_object *obj = ring->obj;
  379. int ret = 0;
  380. u32 head;
  381. gen6_gt_force_wake_get(dev_priv);
  382. if (I915_NEED_GFX_HWS(dev))
  383. intel_ring_setup_status_page(ring);
  384. else
  385. ring_setup_phys_status_page(ring);
  386. /* Stop the ring if it's running. */
  387. I915_WRITE_CTL(ring, 0);
  388. I915_WRITE_HEAD(ring, 0);
  389. ring->write_tail(ring, 0);
  390. head = I915_READ_HEAD(ring) & HEAD_ADDR;
  391. /* G45 ring initialization fails to reset head to zero */
  392. if (head != 0) {
  393. DRM_DEBUG_KMS("%s head not reset to zero "
  394. "ctl %08x head %08x tail %08x start %08x\n",
  395. ring->name,
  396. I915_READ_CTL(ring),
  397. I915_READ_HEAD(ring),
  398. I915_READ_TAIL(ring),
  399. I915_READ_START(ring));
  400. I915_WRITE_HEAD(ring, 0);
  401. if (I915_READ_HEAD(ring) & HEAD_ADDR) {
  402. DRM_ERROR("failed to set %s head to zero "
  403. "ctl %08x head %08x tail %08x start %08x\n",
  404. ring->name,
  405. I915_READ_CTL(ring),
  406. I915_READ_HEAD(ring),
  407. I915_READ_TAIL(ring),
  408. I915_READ_START(ring));
  409. }
  410. }
  411. /* Initialize the ring. This must happen _after_ we've cleared the ring
  412. * registers with the above sequence (the readback of the HEAD registers
  413. * also enforces ordering), otherwise the hw might lose the new ring
  414. * register values. */
  415. I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
  416. I915_WRITE_CTL(ring,
  417. ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
  418. | RING_VALID);
  419. /* If the head is still not zero, the ring is dead */
  420. if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
  421. I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
  422. (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
  423. DRM_ERROR("%s initialization failed "
  424. "ctl %08x head %08x tail %08x start %08x\n",
  425. ring->name,
  426. I915_READ_CTL(ring),
  427. I915_READ_HEAD(ring),
  428. I915_READ_TAIL(ring),
  429. I915_READ_START(ring));
  430. ret = -EIO;
  431. goto out;
  432. }
  433. if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
  434. i915_kernel_lost_context(ring->dev);
  435. else {
  436. ring->head = I915_READ_HEAD(ring);
  437. ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  438. ring->space = ring_space(ring);
  439. ring->last_retired_head = -1;
  440. }
  441. memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
  442. out:
  443. gen6_gt_force_wake_put(dev_priv);
  444. return ret;
  445. }
  446. static int
  447. init_pipe_control(struct intel_ring_buffer *ring)
  448. {
  449. int ret;
  450. if (ring->scratch.obj)
  451. return 0;
  452. ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
  453. if (ring->scratch.obj == NULL) {
  454. DRM_ERROR("Failed to allocate seqno page\n");
  455. ret = -ENOMEM;
  456. goto err;
  457. }
  458. i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
  459. ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
  460. if (ret)
  461. goto err_unref;
  462. ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
  463. ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
  464. if (ring->scratch.cpu_page == NULL) {
  465. ret = -ENOMEM;
  466. goto err_unpin;
  467. }
  468. DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
  469. ring->name, ring->scratch.gtt_offset);
  470. return 0;
  471. err_unpin:
  472. i915_gem_object_unpin(ring->scratch.obj);
  473. err_unref:
  474. drm_gem_object_unreference(&ring->scratch.obj->base);
  475. err:
  476. return ret;
  477. }
  478. static int init_render_ring(struct intel_ring_buffer *ring)
  479. {
  480. struct drm_device *dev = ring->dev;
  481. struct drm_i915_private *dev_priv = dev->dev_private;
  482. int ret = init_ring_common(ring);
  483. if (INTEL_INFO(dev)->gen > 3)
  484. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  485. /* We need to disable the AsyncFlip performance optimisations in order
  486. * to use MI_WAIT_FOR_EVENT within the CS. It should already be
  487. * programmed to '1' on all products.
  488. *
  489. * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
  490. */
  491. if (INTEL_INFO(dev)->gen >= 6)
  492. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  493. /* Required for the hardware to program scanline values for waiting */
  494. if (INTEL_INFO(dev)->gen == 6)
  495. I915_WRITE(GFX_MODE,
  496. _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
  497. if (IS_GEN7(dev))
  498. I915_WRITE(GFX_MODE_GEN7,
  499. _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
  500. _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  501. if (INTEL_INFO(dev)->gen >= 5) {
  502. ret = init_pipe_control(ring);
  503. if (ret)
  504. return ret;
  505. }
  506. if (IS_GEN6(dev)) {
  507. /* From the Sandybridge PRM, volume 1 part 3, page 24:
  508. * "If this bit is set, STCunit will have LRA as replacement
  509. * policy. [...] This bit must be reset. LRA replacement
  510. * policy is not supported."
  511. */
  512. I915_WRITE(CACHE_MODE_0,
  513. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  514. /* This is not explicitly set for GEN6, so read the register.
  515. * see intel_ring_mi_set_context() for why we care.
  516. * TODO: consider explicitly setting the bit for GEN5
  517. */
  518. ring->itlb_before_ctx_switch =
  519. !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
  520. }
  521. if (INTEL_INFO(dev)->gen >= 6)
  522. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  523. if (HAS_L3_DPF(dev))
  524. I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
  525. return ret;
  526. }
  527. static void render_ring_cleanup(struct intel_ring_buffer *ring)
  528. {
  529. struct drm_device *dev = ring->dev;
  530. if (ring->scratch.obj == NULL)
  531. return;
  532. if (INTEL_INFO(dev)->gen >= 5) {
  533. kunmap(sg_page(ring->scratch.obj->pages->sgl));
  534. i915_gem_object_unpin(ring->scratch.obj);
  535. }
  536. drm_gem_object_unreference(&ring->scratch.obj->base);
  537. ring->scratch.obj = NULL;
  538. }
  539. static void
  540. update_mboxes(struct intel_ring_buffer *ring,
  541. u32 mmio_offset)
  542. {
  543. /* NB: In order to be able to do semaphore MBOX updates for varying number
  544. * of rings, it's easiest if we round up each individual update to a
  545. * multiple of 2 (since ring updates must always be a multiple of 2)
  546. * even though the actual update only requires 3 dwords.
  547. */
  548. #define MBOX_UPDATE_DWORDS 4
  549. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  550. intel_ring_emit(ring, mmio_offset);
  551. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  552. intel_ring_emit(ring, MI_NOOP);
  553. }
  554. /**
  555. * gen6_add_request - Update the semaphore mailbox registers
  556. *
  557. * @ring - ring that is adding a request
  558. * @seqno - return seqno stuck into the ring
  559. *
  560. * Update the mailbox registers in the *other* rings with the current seqno.
  561. * This acts like a signal in the canonical semaphore.
  562. */
  563. static int
  564. gen6_add_request(struct intel_ring_buffer *ring)
  565. {
  566. struct drm_device *dev = ring->dev;
  567. struct drm_i915_private *dev_priv = dev->dev_private;
  568. struct intel_ring_buffer *useless;
  569. int i, ret;
  570. ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
  571. MBOX_UPDATE_DWORDS) +
  572. 4);
  573. if (ret)
  574. return ret;
  575. #undef MBOX_UPDATE_DWORDS
  576. for_each_ring(useless, dev_priv, i) {
  577. u32 mbox_reg = ring->signal_mbox[i];
  578. if (mbox_reg != GEN6_NOSYNC)
  579. update_mboxes(ring, mbox_reg);
  580. }
  581. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  582. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  583. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  584. intel_ring_emit(ring, MI_USER_INTERRUPT);
  585. __intel_ring_advance(ring);
  586. return 0;
  587. }
  588. static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
  589. u32 seqno)
  590. {
  591. struct drm_i915_private *dev_priv = dev->dev_private;
  592. return dev_priv->last_seqno < seqno;
  593. }
  594. /**
  595. * intel_ring_sync - sync the waiter to the signaller on seqno
  596. *
  597. * @waiter - ring that is waiting
  598. * @signaller - ring which has, or will signal
  599. * @seqno - seqno which the waiter will block on
  600. */
  601. static int
  602. gen6_ring_sync(struct intel_ring_buffer *waiter,
  603. struct intel_ring_buffer *signaller,
  604. u32 seqno)
  605. {
  606. int ret;
  607. u32 dw1 = MI_SEMAPHORE_MBOX |
  608. MI_SEMAPHORE_COMPARE |
  609. MI_SEMAPHORE_REGISTER;
  610. /* Throughout all of the GEM code, seqno passed implies our current
  611. * seqno is >= the last seqno executed. However for hardware the
  612. * comparison is strictly greater than.
  613. */
  614. seqno -= 1;
  615. WARN_ON(signaller->semaphore_register[waiter->id] ==
  616. MI_SEMAPHORE_SYNC_INVALID);
  617. ret = intel_ring_begin(waiter, 4);
  618. if (ret)
  619. return ret;
  620. /* If seqno wrap happened, omit the wait with no-ops */
  621. if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
  622. intel_ring_emit(waiter,
  623. dw1 |
  624. signaller->semaphore_register[waiter->id]);
  625. intel_ring_emit(waiter, seqno);
  626. intel_ring_emit(waiter, 0);
  627. intel_ring_emit(waiter, MI_NOOP);
  628. } else {
  629. intel_ring_emit(waiter, MI_NOOP);
  630. intel_ring_emit(waiter, MI_NOOP);
  631. intel_ring_emit(waiter, MI_NOOP);
  632. intel_ring_emit(waiter, MI_NOOP);
  633. }
  634. intel_ring_advance(waiter);
  635. return 0;
  636. }
  637. #define PIPE_CONTROL_FLUSH(ring__, addr__) \
  638. do { \
  639. intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
  640. PIPE_CONTROL_DEPTH_STALL); \
  641. intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
  642. intel_ring_emit(ring__, 0); \
  643. intel_ring_emit(ring__, 0); \
  644. } while (0)
  645. static int
  646. pc_render_add_request(struct intel_ring_buffer *ring)
  647. {
  648. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  649. int ret;
  650. /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
  651. * incoherent with writes to memory, i.e. completely fubar,
  652. * so we need to use PIPE_NOTIFY instead.
  653. *
  654. * However, we also need to workaround the qword write
  655. * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
  656. * memory before requesting an interrupt.
  657. */
  658. ret = intel_ring_begin(ring, 32);
  659. if (ret)
  660. return ret;
  661. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  662. PIPE_CONTROL_WRITE_FLUSH |
  663. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
  664. intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  665. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  666. intel_ring_emit(ring, 0);
  667. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  668. scratch_addr += 128; /* write to separate cachelines */
  669. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  670. scratch_addr += 128;
  671. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  672. scratch_addr += 128;
  673. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  674. scratch_addr += 128;
  675. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  676. scratch_addr += 128;
  677. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  678. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  679. PIPE_CONTROL_WRITE_FLUSH |
  680. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
  681. PIPE_CONTROL_NOTIFY);
  682. intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  683. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  684. intel_ring_emit(ring, 0);
  685. __intel_ring_advance(ring);
  686. return 0;
  687. }
  688. static u32
  689. gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  690. {
  691. /* Workaround to force correct ordering between irq and seqno writes on
  692. * ivb (and maybe also on snb) by reading from a CS register (like
  693. * ACTHD) before reading the status page. */
  694. if (!lazy_coherency)
  695. intel_ring_get_active_head(ring);
  696. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  697. }
  698. static u32
  699. ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  700. {
  701. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  702. }
  703. static void
  704. ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
  705. {
  706. intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
  707. }
  708. static u32
  709. pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  710. {
  711. return ring->scratch.cpu_page[0];
  712. }
  713. static void
  714. pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
  715. {
  716. ring->scratch.cpu_page[0] = seqno;
  717. }
  718. static bool
  719. gen5_ring_get_irq(struct intel_ring_buffer *ring)
  720. {
  721. struct drm_device *dev = ring->dev;
  722. drm_i915_private_t *dev_priv = dev->dev_private;
  723. unsigned long flags;
  724. if (!dev->irq_enabled)
  725. return false;
  726. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  727. if (ring->irq_refcount++ == 0)
  728. ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
  729. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  730. return true;
  731. }
  732. static void
  733. gen5_ring_put_irq(struct intel_ring_buffer *ring)
  734. {
  735. struct drm_device *dev = ring->dev;
  736. drm_i915_private_t *dev_priv = dev->dev_private;
  737. unsigned long flags;
  738. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  739. if (--ring->irq_refcount == 0)
  740. ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
  741. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  742. }
  743. static bool
  744. i9xx_ring_get_irq(struct intel_ring_buffer *ring)
  745. {
  746. struct drm_device *dev = ring->dev;
  747. drm_i915_private_t *dev_priv = dev->dev_private;
  748. unsigned long flags;
  749. if (!dev->irq_enabled)
  750. return false;
  751. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  752. if (ring->irq_refcount++ == 0) {
  753. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  754. I915_WRITE(IMR, dev_priv->irq_mask);
  755. POSTING_READ(IMR);
  756. }
  757. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  758. return true;
  759. }
  760. static void
  761. i9xx_ring_put_irq(struct intel_ring_buffer *ring)
  762. {
  763. struct drm_device *dev = ring->dev;
  764. drm_i915_private_t *dev_priv = dev->dev_private;
  765. unsigned long flags;
  766. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  767. if (--ring->irq_refcount == 0) {
  768. dev_priv->irq_mask |= ring->irq_enable_mask;
  769. I915_WRITE(IMR, dev_priv->irq_mask);
  770. POSTING_READ(IMR);
  771. }
  772. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  773. }
  774. static bool
  775. i8xx_ring_get_irq(struct intel_ring_buffer *ring)
  776. {
  777. struct drm_device *dev = ring->dev;
  778. drm_i915_private_t *dev_priv = dev->dev_private;
  779. unsigned long flags;
  780. if (!dev->irq_enabled)
  781. return false;
  782. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  783. if (ring->irq_refcount++ == 0) {
  784. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  785. I915_WRITE16(IMR, dev_priv->irq_mask);
  786. POSTING_READ16(IMR);
  787. }
  788. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  789. return true;
  790. }
  791. static void
  792. i8xx_ring_put_irq(struct intel_ring_buffer *ring)
  793. {
  794. struct drm_device *dev = ring->dev;
  795. drm_i915_private_t *dev_priv = dev->dev_private;
  796. unsigned long flags;
  797. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  798. if (--ring->irq_refcount == 0) {
  799. dev_priv->irq_mask |= ring->irq_enable_mask;
  800. I915_WRITE16(IMR, dev_priv->irq_mask);
  801. POSTING_READ16(IMR);
  802. }
  803. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  804. }
  805. void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
  806. {
  807. struct drm_device *dev = ring->dev;
  808. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  809. u32 mmio = 0;
  810. /* The ring status page addresses are no longer next to the rest of
  811. * the ring registers as of gen7.
  812. */
  813. if (IS_GEN7(dev)) {
  814. switch (ring->id) {
  815. case RCS:
  816. mmio = RENDER_HWS_PGA_GEN7;
  817. break;
  818. case BCS:
  819. mmio = BLT_HWS_PGA_GEN7;
  820. break;
  821. case VCS:
  822. mmio = BSD_HWS_PGA_GEN7;
  823. break;
  824. case VECS:
  825. mmio = VEBOX_HWS_PGA_GEN7;
  826. break;
  827. }
  828. } else if (IS_GEN6(ring->dev)) {
  829. mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
  830. } else {
  831. /* XXX: gen8 returns to sanity */
  832. mmio = RING_HWS_PGA(ring->mmio_base);
  833. }
  834. I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
  835. POSTING_READ(mmio);
  836. /* Flush the TLB for this page */
  837. if (INTEL_INFO(dev)->gen >= 6) {
  838. u32 reg = RING_INSTPM(ring->mmio_base);
  839. I915_WRITE(reg,
  840. _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
  841. INSTPM_SYNC_FLUSH));
  842. if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
  843. 1000))
  844. DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
  845. ring->name);
  846. }
  847. }
  848. static int
  849. bsd_ring_flush(struct intel_ring_buffer *ring,
  850. u32 invalidate_domains,
  851. u32 flush_domains)
  852. {
  853. int ret;
  854. ret = intel_ring_begin(ring, 2);
  855. if (ret)
  856. return ret;
  857. intel_ring_emit(ring, MI_FLUSH);
  858. intel_ring_emit(ring, MI_NOOP);
  859. intel_ring_advance(ring);
  860. return 0;
  861. }
  862. static int
  863. i9xx_add_request(struct intel_ring_buffer *ring)
  864. {
  865. int ret;
  866. ret = intel_ring_begin(ring, 4);
  867. if (ret)
  868. return ret;
  869. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  870. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  871. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  872. intel_ring_emit(ring, MI_USER_INTERRUPT);
  873. __intel_ring_advance(ring);
  874. return 0;
  875. }
  876. static bool
  877. gen6_ring_get_irq(struct intel_ring_buffer *ring)
  878. {
  879. struct drm_device *dev = ring->dev;
  880. drm_i915_private_t *dev_priv = dev->dev_private;
  881. unsigned long flags;
  882. if (!dev->irq_enabled)
  883. return false;
  884. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  885. if (ring->irq_refcount++ == 0) {
  886. if (HAS_L3_DPF(dev) && ring->id == RCS)
  887. I915_WRITE_IMR(ring,
  888. ~(ring->irq_enable_mask |
  889. GT_PARITY_ERROR(dev)));
  890. else
  891. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  892. ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
  893. }
  894. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  895. return true;
  896. }
  897. static void
  898. gen6_ring_put_irq(struct intel_ring_buffer *ring)
  899. {
  900. struct drm_device *dev = ring->dev;
  901. drm_i915_private_t *dev_priv = dev->dev_private;
  902. unsigned long flags;
  903. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  904. if (--ring->irq_refcount == 0) {
  905. if (HAS_L3_DPF(dev) && ring->id == RCS)
  906. I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
  907. else
  908. I915_WRITE_IMR(ring, ~0);
  909. ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
  910. }
  911. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  912. }
  913. static bool
  914. hsw_vebox_get_irq(struct intel_ring_buffer *ring)
  915. {
  916. struct drm_device *dev = ring->dev;
  917. struct drm_i915_private *dev_priv = dev->dev_private;
  918. unsigned long flags;
  919. if (!dev->irq_enabled)
  920. return false;
  921. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  922. if (ring->irq_refcount++ == 0) {
  923. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  924. snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
  925. }
  926. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  927. return true;
  928. }
  929. static void
  930. hsw_vebox_put_irq(struct intel_ring_buffer *ring)
  931. {
  932. struct drm_device *dev = ring->dev;
  933. struct drm_i915_private *dev_priv = dev->dev_private;
  934. unsigned long flags;
  935. if (!dev->irq_enabled)
  936. return;
  937. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  938. if (--ring->irq_refcount == 0) {
  939. I915_WRITE_IMR(ring, ~0);
  940. snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
  941. }
  942. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  943. }
  944. static bool
  945. gen8_ring_get_irq(struct intel_ring_buffer *ring)
  946. {
  947. struct drm_device *dev = ring->dev;
  948. struct drm_i915_private *dev_priv = dev->dev_private;
  949. unsigned long flags;
  950. if (!dev->irq_enabled)
  951. return false;
  952. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  953. if (ring->irq_refcount++ == 0) {
  954. if (HAS_L3_DPF(dev) && ring->id == RCS) {
  955. I915_WRITE_IMR(ring,
  956. ~(ring->irq_enable_mask |
  957. GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
  958. } else {
  959. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  960. }
  961. POSTING_READ(RING_IMR(ring->mmio_base));
  962. }
  963. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  964. return true;
  965. }
  966. static void
  967. gen8_ring_put_irq(struct intel_ring_buffer *ring)
  968. {
  969. struct drm_device *dev = ring->dev;
  970. struct drm_i915_private *dev_priv = dev->dev_private;
  971. unsigned long flags;
  972. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  973. if (--ring->irq_refcount == 0) {
  974. if (HAS_L3_DPF(dev) && ring->id == RCS) {
  975. I915_WRITE_IMR(ring,
  976. ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
  977. } else {
  978. I915_WRITE_IMR(ring, ~0);
  979. }
  980. POSTING_READ(RING_IMR(ring->mmio_base));
  981. }
  982. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  983. }
  984. static int
  985. i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
  986. u32 offset, u32 length,
  987. unsigned flags)
  988. {
  989. int ret;
  990. ret = intel_ring_begin(ring, 2);
  991. if (ret)
  992. return ret;
  993. intel_ring_emit(ring,
  994. MI_BATCH_BUFFER_START |
  995. MI_BATCH_GTT |
  996. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
  997. intel_ring_emit(ring, offset);
  998. intel_ring_advance(ring);
  999. return 0;
  1000. }
  1001. /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
  1002. #define I830_BATCH_LIMIT (256*1024)
  1003. static int
  1004. i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1005. u32 offset, u32 len,
  1006. unsigned flags)
  1007. {
  1008. int ret;
  1009. if (flags & I915_DISPATCH_PINNED) {
  1010. ret = intel_ring_begin(ring, 4);
  1011. if (ret)
  1012. return ret;
  1013. intel_ring_emit(ring, MI_BATCH_BUFFER);
  1014. intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  1015. intel_ring_emit(ring, offset + len - 8);
  1016. intel_ring_emit(ring, MI_NOOP);
  1017. intel_ring_advance(ring);
  1018. } else {
  1019. u32 cs_offset = ring->scratch.gtt_offset;
  1020. if (len > I830_BATCH_LIMIT)
  1021. return -ENOSPC;
  1022. ret = intel_ring_begin(ring, 9+3);
  1023. if (ret)
  1024. return ret;
  1025. /* Blit the batch (which has now all relocs applied) to the stable batch
  1026. * scratch bo area (so that the CS never stumbles over its tlb
  1027. * invalidation bug) ... */
  1028. intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
  1029. XY_SRC_COPY_BLT_WRITE_ALPHA |
  1030. XY_SRC_COPY_BLT_WRITE_RGB);
  1031. intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
  1032. intel_ring_emit(ring, 0);
  1033. intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
  1034. intel_ring_emit(ring, cs_offset);
  1035. intel_ring_emit(ring, 0);
  1036. intel_ring_emit(ring, 4096);
  1037. intel_ring_emit(ring, offset);
  1038. intel_ring_emit(ring, MI_FLUSH);
  1039. /* ... and execute it. */
  1040. intel_ring_emit(ring, MI_BATCH_BUFFER);
  1041. intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  1042. intel_ring_emit(ring, cs_offset + len - 8);
  1043. intel_ring_advance(ring);
  1044. }
  1045. return 0;
  1046. }
  1047. static int
  1048. i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1049. u32 offset, u32 len,
  1050. unsigned flags)
  1051. {
  1052. int ret;
  1053. ret = intel_ring_begin(ring, 2);
  1054. if (ret)
  1055. return ret;
  1056. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
  1057. intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  1058. intel_ring_advance(ring);
  1059. return 0;
  1060. }
  1061. static void cleanup_status_page(struct intel_ring_buffer *ring)
  1062. {
  1063. struct drm_i915_gem_object *obj;
  1064. obj = ring->status_page.obj;
  1065. if (obj == NULL)
  1066. return;
  1067. kunmap(sg_page(obj->pages->sgl));
  1068. i915_gem_object_unpin(obj);
  1069. drm_gem_object_unreference(&obj->base);
  1070. ring->status_page.obj = NULL;
  1071. }
  1072. static int init_status_page(struct intel_ring_buffer *ring)
  1073. {
  1074. struct drm_device *dev = ring->dev;
  1075. struct drm_i915_gem_object *obj;
  1076. int ret;
  1077. obj = i915_gem_alloc_object(dev, 4096);
  1078. if (obj == NULL) {
  1079. DRM_ERROR("Failed to allocate status page\n");
  1080. ret = -ENOMEM;
  1081. goto err;
  1082. }
  1083. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  1084. ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
  1085. if (ret != 0) {
  1086. goto err_unref;
  1087. }
  1088. ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
  1089. ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
  1090. if (ring->status_page.page_addr == NULL) {
  1091. ret = -ENOMEM;
  1092. goto err_unpin;
  1093. }
  1094. ring->status_page.obj = obj;
  1095. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  1096. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  1097. ring->name, ring->status_page.gfx_addr);
  1098. return 0;
  1099. err_unpin:
  1100. i915_gem_object_unpin(obj);
  1101. err_unref:
  1102. drm_gem_object_unreference(&obj->base);
  1103. err:
  1104. return ret;
  1105. }
  1106. static int init_phys_status_page(struct intel_ring_buffer *ring)
  1107. {
  1108. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1109. if (!dev_priv->status_page_dmah) {
  1110. dev_priv->status_page_dmah =
  1111. drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
  1112. if (!dev_priv->status_page_dmah)
  1113. return -ENOMEM;
  1114. }
  1115. ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1116. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  1117. return 0;
  1118. }
  1119. static int intel_init_ring_buffer(struct drm_device *dev,
  1120. struct intel_ring_buffer *ring)
  1121. {
  1122. struct drm_i915_gem_object *obj;
  1123. struct drm_i915_private *dev_priv = dev->dev_private;
  1124. int ret;
  1125. ring->dev = dev;
  1126. INIT_LIST_HEAD(&ring->active_list);
  1127. INIT_LIST_HEAD(&ring->request_list);
  1128. ring->size = 32 * PAGE_SIZE;
  1129. memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
  1130. init_waitqueue_head(&ring->irq_queue);
  1131. if (I915_NEED_GFX_HWS(dev)) {
  1132. ret = init_status_page(ring);
  1133. if (ret)
  1134. return ret;
  1135. } else {
  1136. BUG_ON(ring->id != RCS);
  1137. ret = init_phys_status_page(ring);
  1138. if (ret)
  1139. return ret;
  1140. }
  1141. obj = NULL;
  1142. if (!HAS_LLC(dev))
  1143. obj = i915_gem_object_create_stolen(dev, ring->size);
  1144. if (obj == NULL)
  1145. obj = i915_gem_alloc_object(dev, ring->size);
  1146. if (obj == NULL) {
  1147. DRM_ERROR("Failed to allocate ringbuffer\n");
  1148. ret = -ENOMEM;
  1149. goto err_hws;
  1150. }
  1151. ring->obj = obj;
  1152. ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
  1153. if (ret)
  1154. goto err_unref;
  1155. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  1156. if (ret)
  1157. goto err_unpin;
  1158. ring->virtual_start =
  1159. ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
  1160. ring->size);
  1161. if (ring->virtual_start == NULL) {
  1162. DRM_ERROR("Failed to map ringbuffer.\n");
  1163. ret = -EINVAL;
  1164. goto err_unpin;
  1165. }
  1166. ret = ring->init(ring);
  1167. if (ret)
  1168. goto err_unmap;
  1169. /* Workaround an erratum on the i830 which causes a hang if
  1170. * the TAIL pointer points to within the last 2 cachelines
  1171. * of the buffer.
  1172. */
  1173. ring->effective_size = ring->size;
  1174. if (IS_I830(ring->dev) || IS_845G(ring->dev))
  1175. ring->effective_size -= 128;
  1176. return 0;
  1177. err_unmap:
  1178. iounmap(ring->virtual_start);
  1179. err_unpin:
  1180. i915_gem_object_unpin(obj);
  1181. err_unref:
  1182. drm_gem_object_unreference(&obj->base);
  1183. ring->obj = NULL;
  1184. err_hws:
  1185. cleanup_status_page(ring);
  1186. return ret;
  1187. }
  1188. void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
  1189. {
  1190. struct drm_i915_private *dev_priv;
  1191. int ret;
  1192. if (ring->obj == NULL)
  1193. return;
  1194. /* Disable the ring buffer. The ring must be idle at this point */
  1195. dev_priv = ring->dev->dev_private;
  1196. ret = intel_ring_idle(ring);
  1197. if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
  1198. DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
  1199. ring->name, ret);
  1200. I915_WRITE_CTL(ring, 0);
  1201. iounmap(ring->virtual_start);
  1202. i915_gem_object_unpin(ring->obj);
  1203. drm_gem_object_unreference(&ring->obj->base);
  1204. ring->obj = NULL;
  1205. ring->preallocated_lazy_request = NULL;
  1206. ring->outstanding_lazy_seqno = 0;
  1207. if (ring->cleanup)
  1208. ring->cleanup(ring);
  1209. cleanup_status_page(ring);
  1210. }
  1211. static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
  1212. {
  1213. int ret;
  1214. ret = i915_wait_seqno(ring, seqno);
  1215. if (!ret)
  1216. i915_gem_retire_requests_ring(ring);
  1217. return ret;
  1218. }
  1219. static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
  1220. {
  1221. struct drm_i915_gem_request *request;
  1222. u32 seqno = 0;
  1223. int ret;
  1224. i915_gem_retire_requests_ring(ring);
  1225. if (ring->last_retired_head != -1) {
  1226. ring->head = ring->last_retired_head;
  1227. ring->last_retired_head = -1;
  1228. ring->space = ring_space(ring);
  1229. if (ring->space >= n)
  1230. return 0;
  1231. }
  1232. list_for_each_entry(request, &ring->request_list, list) {
  1233. int space;
  1234. if (request->tail == -1)
  1235. continue;
  1236. space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
  1237. if (space < 0)
  1238. space += ring->size;
  1239. if (space >= n) {
  1240. seqno = request->seqno;
  1241. break;
  1242. }
  1243. /* Consume this request in case we need more space than
  1244. * is available and so need to prevent a race between
  1245. * updating last_retired_head and direct reads of
  1246. * I915_RING_HEAD. It also provides a nice sanity check.
  1247. */
  1248. request->tail = -1;
  1249. }
  1250. if (seqno == 0)
  1251. return -ENOSPC;
  1252. ret = intel_ring_wait_seqno(ring, seqno);
  1253. if (ret)
  1254. return ret;
  1255. if (WARN_ON(ring->last_retired_head == -1))
  1256. return -ENOSPC;
  1257. ring->head = ring->last_retired_head;
  1258. ring->last_retired_head = -1;
  1259. ring->space = ring_space(ring);
  1260. if (WARN_ON(ring->space < n))
  1261. return -ENOSPC;
  1262. return 0;
  1263. }
  1264. static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
  1265. {
  1266. struct drm_device *dev = ring->dev;
  1267. struct drm_i915_private *dev_priv = dev->dev_private;
  1268. unsigned long end;
  1269. int ret;
  1270. ret = intel_ring_wait_request(ring, n);
  1271. if (ret != -ENOSPC)
  1272. return ret;
  1273. /* force the tail write in case we have been skipping them */
  1274. __intel_ring_advance(ring);
  1275. trace_i915_ring_wait_begin(ring);
  1276. /* With GEM the hangcheck timer should kick us out of the loop,
  1277. * leaving it early runs the risk of corrupting GEM state (due
  1278. * to running on almost untested codepaths). But on resume
  1279. * timers don't work yet, so prevent a complete hang in that
  1280. * case by choosing an insanely large timeout. */
  1281. end = jiffies + 60 * HZ;
  1282. do {
  1283. ring->head = I915_READ_HEAD(ring);
  1284. ring->space = ring_space(ring);
  1285. if (ring->space >= n) {
  1286. trace_i915_ring_wait_end(ring);
  1287. return 0;
  1288. }
  1289. if (dev->primary->master) {
  1290. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  1291. if (master_priv->sarea_priv)
  1292. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  1293. }
  1294. msleep(1);
  1295. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  1296. dev_priv->mm.interruptible);
  1297. if (ret)
  1298. return ret;
  1299. } while (!time_after(jiffies, end));
  1300. trace_i915_ring_wait_end(ring);
  1301. return -EBUSY;
  1302. }
  1303. static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
  1304. {
  1305. uint32_t __iomem *virt;
  1306. int rem = ring->size - ring->tail;
  1307. if (ring->space < rem) {
  1308. int ret = ring_wait_for_space(ring, rem);
  1309. if (ret)
  1310. return ret;
  1311. }
  1312. virt = ring->virtual_start + ring->tail;
  1313. rem /= 4;
  1314. while (rem--)
  1315. iowrite32(MI_NOOP, virt++);
  1316. ring->tail = 0;
  1317. ring->space = ring_space(ring);
  1318. return 0;
  1319. }
  1320. int intel_ring_idle(struct intel_ring_buffer *ring)
  1321. {
  1322. u32 seqno;
  1323. int ret;
  1324. /* We need to add any requests required to flush the objects and ring */
  1325. if (ring->outstanding_lazy_seqno) {
  1326. ret = i915_add_request(ring, NULL);
  1327. if (ret)
  1328. return ret;
  1329. }
  1330. /* Wait upon the last request to be completed */
  1331. if (list_empty(&ring->request_list))
  1332. return 0;
  1333. seqno = list_entry(ring->request_list.prev,
  1334. struct drm_i915_gem_request,
  1335. list)->seqno;
  1336. return i915_wait_seqno(ring, seqno);
  1337. }
  1338. static int
  1339. intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
  1340. {
  1341. if (ring->outstanding_lazy_seqno)
  1342. return 0;
  1343. if (ring->preallocated_lazy_request == NULL) {
  1344. struct drm_i915_gem_request *request;
  1345. request = kmalloc(sizeof(*request), GFP_KERNEL);
  1346. if (request == NULL)
  1347. return -ENOMEM;
  1348. ring->preallocated_lazy_request = request;
  1349. }
  1350. return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
  1351. }
  1352. static int __intel_ring_begin(struct intel_ring_buffer *ring,
  1353. int bytes)
  1354. {
  1355. int ret;
  1356. if (unlikely(ring->tail + bytes > ring->effective_size)) {
  1357. ret = intel_wrap_ring_buffer(ring);
  1358. if (unlikely(ret))
  1359. return ret;
  1360. }
  1361. if (unlikely(ring->space < bytes)) {
  1362. ret = ring_wait_for_space(ring, bytes);
  1363. if (unlikely(ret))
  1364. return ret;
  1365. }
  1366. ring->space -= bytes;
  1367. return 0;
  1368. }
  1369. int intel_ring_begin(struct intel_ring_buffer *ring,
  1370. int num_dwords)
  1371. {
  1372. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1373. int ret;
  1374. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  1375. dev_priv->mm.interruptible);
  1376. if (ret)
  1377. return ret;
  1378. /* Preallocate the olr before touching the ring */
  1379. ret = intel_ring_alloc_seqno(ring);
  1380. if (ret)
  1381. return ret;
  1382. return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
  1383. }
  1384. void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
  1385. {
  1386. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1387. BUG_ON(ring->outstanding_lazy_seqno);
  1388. if (INTEL_INFO(ring->dev)->gen >= 6) {
  1389. I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
  1390. I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
  1391. if (HAS_VEBOX(ring->dev))
  1392. I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
  1393. }
  1394. ring->set_seqno(ring, seqno);
  1395. ring->hangcheck.seqno = seqno;
  1396. }
  1397. static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
  1398. u32 value)
  1399. {
  1400. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1401. /* Every tail move must follow the sequence below */
  1402. /* Disable notification that the ring is IDLE. The GT
  1403. * will then assume that it is busy and bring it out of rc6.
  1404. */
  1405. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1406. _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1407. /* Clear the context id. Here be magic! */
  1408. I915_WRITE64(GEN6_BSD_RNCID, 0x0);
  1409. /* Wait for the ring not to be idle, i.e. for it to wake up. */
  1410. if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
  1411. GEN6_BSD_SLEEP_INDICATOR) == 0,
  1412. 50))
  1413. DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
  1414. /* Now that the ring is fully powered up, update the tail */
  1415. I915_WRITE_TAIL(ring, value);
  1416. POSTING_READ(RING_TAIL(ring->mmio_base));
  1417. /* Let the ring send IDLE messages to the GT again,
  1418. * and so let it sleep to conserve power when idle.
  1419. */
  1420. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1421. _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1422. }
  1423. static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
  1424. u32 invalidate, u32 flush)
  1425. {
  1426. uint32_t cmd;
  1427. int ret;
  1428. ret = intel_ring_begin(ring, 4);
  1429. if (ret)
  1430. return ret;
  1431. cmd = MI_FLUSH_DW;
  1432. if (INTEL_INFO(ring->dev)->gen >= 8)
  1433. cmd += 1;
  1434. /*
  1435. * Bspec vol 1c.5 - video engine command streamer:
  1436. * "If ENABLED, all TLBs will be invalidated once the flush
  1437. * operation is complete. This bit is only valid when the
  1438. * Post-Sync Operation field is a value of 1h or 3h."
  1439. */
  1440. if (invalidate & I915_GEM_GPU_DOMAINS)
  1441. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
  1442. MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  1443. intel_ring_emit(ring, cmd);
  1444. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  1445. if (INTEL_INFO(ring->dev)->gen >= 8) {
  1446. intel_ring_emit(ring, 0); /* upper addr */
  1447. intel_ring_emit(ring, 0); /* value */
  1448. } else {
  1449. intel_ring_emit(ring, 0);
  1450. intel_ring_emit(ring, MI_NOOP);
  1451. }
  1452. intel_ring_advance(ring);
  1453. return 0;
  1454. }
  1455. static int
  1456. gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1457. u32 offset, u32 len,
  1458. unsigned flags)
  1459. {
  1460. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1461. bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
  1462. !(flags & I915_DISPATCH_SECURE);
  1463. int ret;
  1464. ret = intel_ring_begin(ring, 4);
  1465. if (ret)
  1466. return ret;
  1467. /* FIXME(BDW): Address space and security selectors. */
  1468. intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
  1469. intel_ring_emit(ring, offset);
  1470. intel_ring_emit(ring, 0);
  1471. intel_ring_emit(ring, MI_NOOP);
  1472. intel_ring_advance(ring);
  1473. return 0;
  1474. }
  1475. static int
  1476. hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1477. u32 offset, u32 len,
  1478. unsigned flags)
  1479. {
  1480. int ret;
  1481. ret = intel_ring_begin(ring, 2);
  1482. if (ret)
  1483. return ret;
  1484. intel_ring_emit(ring,
  1485. MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
  1486. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
  1487. /* bit0-7 is the length on GEN6+ */
  1488. intel_ring_emit(ring, offset);
  1489. intel_ring_advance(ring);
  1490. return 0;
  1491. }
  1492. static int
  1493. gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1494. u32 offset, u32 len,
  1495. unsigned flags)
  1496. {
  1497. int ret;
  1498. ret = intel_ring_begin(ring, 2);
  1499. if (ret)
  1500. return ret;
  1501. intel_ring_emit(ring,
  1502. MI_BATCH_BUFFER_START |
  1503. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
  1504. /* bit0-7 is the length on GEN6+ */
  1505. intel_ring_emit(ring, offset);
  1506. intel_ring_advance(ring);
  1507. return 0;
  1508. }
  1509. /* Blitter support (SandyBridge+) */
  1510. static int gen6_ring_flush(struct intel_ring_buffer *ring,
  1511. u32 invalidate, u32 flush)
  1512. {
  1513. struct drm_device *dev = ring->dev;
  1514. uint32_t cmd;
  1515. int ret;
  1516. ret = intel_ring_begin(ring, 4);
  1517. if (ret)
  1518. return ret;
  1519. cmd = MI_FLUSH_DW;
  1520. if (INTEL_INFO(ring->dev)->gen >= 8)
  1521. cmd += 1;
  1522. /*
  1523. * Bspec vol 1c.3 - blitter engine command streamer:
  1524. * "If ENABLED, all TLBs will be invalidated once the flush
  1525. * operation is complete. This bit is only valid when the
  1526. * Post-Sync Operation field is a value of 1h or 3h."
  1527. */
  1528. if (invalidate & I915_GEM_DOMAIN_RENDER)
  1529. cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
  1530. MI_FLUSH_DW_OP_STOREDW;
  1531. intel_ring_emit(ring, cmd);
  1532. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  1533. if (INTEL_INFO(ring->dev)->gen >= 8) {
  1534. intel_ring_emit(ring, 0); /* upper addr */
  1535. intel_ring_emit(ring, 0); /* value */
  1536. } else {
  1537. intel_ring_emit(ring, 0);
  1538. intel_ring_emit(ring, MI_NOOP);
  1539. }
  1540. intel_ring_advance(ring);
  1541. if (IS_GEN7(dev) && !invalidate && flush)
  1542. return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
  1543. return 0;
  1544. }
  1545. int intel_init_render_ring_buffer(struct drm_device *dev)
  1546. {
  1547. drm_i915_private_t *dev_priv = dev->dev_private;
  1548. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1549. ring->name = "render ring";
  1550. ring->id = RCS;
  1551. ring->mmio_base = RENDER_RING_BASE;
  1552. if (INTEL_INFO(dev)->gen >= 6) {
  1553. ring->add_request = gen6_add_request;
  1554. ring->flush = gen7_render_ring_flush;
  1555. if (INTEL_INFO(dev)->gen == 6)
  1556. ring->flush = gen6_render_ring_flush;
  1557. if (INTEL_INFO(dev)->gen >= 8) {
  1558. ring->flush = gen8_render_ring_flush;
  1559. ring->irq_get = gen8_ring_get_irq;
  1560. ring->irq_put = gen8_ring_put_irq;
  1561. } else {
  1562. ring->irq_get = gen6_ring_get_irq;
  1563. ring->irq_put = gen6_ring_put_irq;
  1564. }
  1565. ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
  1566. ring->get_seqno = gen6_ring_get_seqno;
  1567. ring->set_seqno = ring_set_seqno;
  1568. ring->sync_to = gen6_ring_sync;
  1569. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
  1570. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
  1571. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
  1572. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
  1573. ring->signal_mbox[RCS] = GEN6_NOSYNC;
  1574. ring->signal_mbox[VCS] = GEN6_VRSYNC;
  1575. ring->signal_mbox[BCS] = GEN6_BRSYNC;
  1576. ring->signal_mbox[VECS] = GEN6_VERSYNC;
  1577. } else if (IS_GEN5(dev)) {
  1578. ring->add_request = pc_render_add_request;
  1579. ring->flush = gen4_render_ring_flush;
  1580. ring->get_seqno = pc_render_get_seqno;
  1581. ring->set_seqno = pc_render_set_seqno;
  1582. ring->irq_get = gen5_ring_get_irq;
  1583. ring->irq_put = gen5_ring_put_irq;
  1584. ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
  1585. GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
  1586. } else {
  1587. ring->add_request = i9xx_add_request;
  1588. if (INTEL_INFO(dev)->gen < 4)
  1589. ring->flush = gen2_render_ring_flush;
  1590. else
  1591. ring->flush = gen4_render_ring_flush;
  1592. ring->get_seqno = ring_get_seqno;
  1593. ring->set_seqno = ring_set_seqno;
  1594. if (IS_GEN2(dev)) {
  1595. ring->irq_get = i8xx_ring_get_irq;
  1596. ring->irq_put = i8xx_ring_put_irq;
  1597. } else {
  1598. ring->irq_get = i9xx_ring_get_irq;
  1599. ring->irq_put = i9xx_ring_put_irq;
  1600. }
  1601. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1602. }
  1603. ring->write_tail = ring_write_tail;
  1604. if (IS_HASWELL(dev))
  1605. ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
  1606. else if (IS_GEN8(dev))
  1607. ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
  1608. else if (INTEL_INFO(dev)->gen >= 6)
  1609. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1610. else if (INTEL_INFO(dev)->gen >= 4)
  1611. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1612. else if (IS_I830(dev) || IS_845G(dev))
  1613. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1614. else
  1615. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1616. ring->init = init_render_ring;
  1617. ring->cleanup = render_ring_cleanup;
  1618. /* Workaround batchbuffer to combat CS tlb bug. */
  1619. if (HAS_BROKEN_CS_TLB(dev)) {
  1620. struct drm_i915_gem_object *obj;
  1621. int ret;
  1622. obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
  1623. if (obj == NULL) {
  1624. DRM_ERROR("Failed to allocate batch bo\n");
  1625. return -ENOMEM;
  1626. }
  1627. ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
  1628. if (ret != 0) {
  1629. drm_gem_object_unreference(&obj->base);
  1630. DRM_ERROR("Failed to ping batch bo\n");
  1631. return ret;
  1632. }
  1633. ring->scratch.obj = obj;
  1634. ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
  1635. }
  1636. return intel_init_ring_buffer(dev, ring);
  1637. }
  1638. int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
  1639. {
  1640. drm_i915_private_t *dev_priv = dev->dev_private;
  1641. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1642. int ret;
  1643. ring->name = "render ring";
  1644. ring->id = RCS;
  1645. ring->mmio_base = RENDER_RING_BASE;
  1646. if (INTEL_INFO(dev)->gen >= 6) {
  1647. /* non-kms not supported on gen6+ */
  1648. return -ENODEV;
  1649. }
  1650. /* Note: gem is not supported on gen5/ilk without kms (the corresponding
  1651. * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
  1652. * the special gen5 functions. */
  1653. ring->add_request = i9xx_add_request;
  1654. if (INTEL_INFO(dev)->gen < 4)
  1655. ring->flush = gen2_render_ring_flush;
  1656. else
  1657. ring->flush = gen4_render_ring_flush;
  1658. ring->get_seqno = ring_get_seqno;
  1659. ring->set_seqno = ring_set_seqno;
  1660. if (IS_GEN2(dev)) {
  1661. ring->irq_get = i8xx_ring_get_irq;
  1662. ring->irq_put = i8xx_ring_put_irq;
  1663. } else {
  1664. ring->irq_get = i9xx_ring_get_irq;
  1665. ring->irq_put = i9xx_ring_put_irq;
  1666. }
  1667. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1668. ring->write_tail = ring_write_tail;
  1669. if (INTEL_INFO(dev)->gen >= 4)
  1670. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1671. else if (IS_I830(dev) || IS_845G(dev))
  1672. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1673. else
  1674. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1675. ring->init = init_render_ring;
  1676. ring->cleanup = render_ring_cleanup;
  1677. ring->dev = dev;
  1678. INIT_LIST_HEAD(&ring->active_list);
  1679. INIT_LIST_HEAD(&ring->request_list);
  1680. ring->size = size;
  1681. ring->effective_size = ring->size;
  1682. if (IS_I830(ring->dev) || IS_845G(ring->dev))
  1683. ring->effective_size -= 128;
  1684. ring->virtual_start = ioremap_wc(start, size);
  1685. if (ring->virtual_start == NULL) {
  1686. DRM_ERROR("can not ioremap virtual address for"
  1687. " ring buffer\n");
  1688. return -ENOMEM;
  1689. }
  1690. if (!I915_NEED_GFX_HWS(dev)) {
  1691. ret = init_phys_status_page(ring);
  1692. if (ret)
  1693. return ret;
  1694. }
  1695. return 0;
  1696. }
  1697. int intel_init_bsd_ring_buffer(struct drm_device *dev)
  1698. {
  1699. drm_i915_private_t *dev_priv = dev->dev_private;
  1700. struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
  1701. ring->name = "bsd ring";
  1702. ring->id = VCS;
  1703. ring->write_tail = ring_write_tail;
  1704. if (INTEL_INFO(dev)->gen >= 6) {
  1705. ring->mmio_base = GEN6_BSD_RING_BASE;
  1706. /* gen6 bsd needs a special wa for tail updates */
  1707. if (IS_GEN6(dev))
  1708. ring->write_tail = gen6_bsd_ring_write_tail;
  1709. ring->flush = gen6_bsd_ring_flush;
  1710. ring->add_request = gen6_add_request;
  1711. ring->get_seqno = gen6_ring_get_seqno;
  1712. ring->set_seqno = ring_set_seqno;
  1713. if (INTEL_INFO(dev)->gen >= 8) {
  1714. ring->irq_enable_mask =
  1715. GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
  1716. ring->irq_get = gen8_ring_get_irq;
  1717. ring->irq_put = gen8_ring_put_irq;
  1718. ring->dispatch_execbuffer =
  1719. gen8_ring_dispatch_execbuffer;
  1720. } else {
  1721. ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
  1722. ring->irq_get = gen6_ring_get_irq;
  1723. ring->irq_put = gen6_ring_put_irq;
  1724. ring->dispatch_execbuffer =
  1725. gen6_ring_dispatch_execbuffer;
  1726. }
  1727. ring->sync_to = gen6_ring_sync;
  1728. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
  1729. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
  1730. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
  1731. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
  1732. ring->signal_mbox[RCS] = GEN6_RVSYNC;
  1733. ring->signal_mbox[VCS] = GEN6_NOSYNC;
  1734. ring->signal_mbox[BCS] = GEN6_BVSYNC;
  1735. ring->signal_mbox[VECS] = GEN6_VEVSYNC;
  1736. } else {
  1737. ring->mmio_base = BSD_RING_BASE;
  1738. ring->flush = bsd_ring_flush;
  1739. ring->add_request = i9xx_add_request;
  1740. ring->get_seqno = ring_get_seqno;
  1741. ring->set_seqno = ring_set_seqno;
  1742. if (IS_GEN5(dev)) {
  1743. ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
  1744. ring->irq_get = gen5_ring_get_irq;
  1745. ring->irq_put = gen5_ring_put_irq;
  1746. } else {
  1747. ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
  1748. ring->irq_get = i9xx_ring_get_irq;
  1749. ring->irq_put = i9xx_ring_put_irq;
  1750. }
  1751. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1752. }
  1753. ring->init = init_ring_common;
  1754. return intel_init_ring_buffer(dev, ring);
  1755. }
  1756. int intel_init_blt_ring_buffer(struct drm_device *dev)
  1757. {
  1758. drm_i915_private_t *dev_priv = dev->dev_private;
  1759. struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
  1760. ring->name = "blitter ring";
  1761. ring->id = BCS;
  1762. ring->mmio_base = BLT_RING_BASE;
  1763. ring->write_tail = ring_write_tail;
  1764. ring->flush = gen6_ring_flush;
  1765. ring->add_request = gen6_add_request;
  1766. ring->get_seqno = gen6_ring_get_seqno;
  1767. ring->set_seqno = ring_set_seqno;
  1768. if (INTEL_INFO(dev)->gen >= 8) {
  1769. ring->irq_enable_mask =
  1770. GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
  1771. ring->irq_get = gen8_ring_get_irq;
  1772. ring->irq_put = gen8_ring_put_irq;
  1773. ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
  1774. } else {
  1775. ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
  1776. ring->irq_get = gen6_ring_get_irq;
  1777. ring->irq_put = gen6_ring_put_irq;
  1778. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1779. }
  1780. ring->sync_to = gen6_ring_sync;
  1781. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
  1782. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
  1783. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
  1784. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
  1785. ring->signal_mbox[RCS] = GEN6_RBSYNC;
  1786. ring->signal_mbox[VCS] = GEN6_VBSYNC;
  1787. ring->signal_mbox[BCS] = GEN6_NOSYNC;
  1788. ring->signal_mbox[VECS] = GEN6_VEBSYNC;
  1789. ring->init = init_ring_common;
  1790. return intel_init_ring_buffer(dev, ring);
  1791. }
  1792. int intel_init_vebox_ring_buffer(struct drm_device *dev)
  1793. {
  1794. drm_i915_private_t *dev_priv = dev->dev_private;
  1795. struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
  1796. ring->name = "video enhancement ring";
  1797. ring->id = VECS;
  1798. ring->mmio_base = VEBOX_RING_BASE;
  1799. ring->write_tail = ring_write_tail;
  1800. ring->flush = gen6_ring_flush;
  1801. ring->add_request = gen6_add_request;
  1802. ring->get_seqno = gen6_ring_get_seqno;
  1803. ring->set_seqno = ring_set_seqno;
  1804. if (INTEL_INFO(dev)->gen >= 8) {
  1805. ring->irq_enable_mask =
  1806. GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
  1807. ring->irq_get = gen8_ring_get_irq;
  1808. ring->irq_put = gen8_ring_put_irq;
  1809. ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
  1810. } else {
  1811. ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
  1812. ring->irq_get = hsw_vebox_get_irq;
  1813. ring->irq_put = hsw_vebox_put_irq;
  1814. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1815. }
  1816. ring->sync_to = gen6_ring_sync;
  1817. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
  1818. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
  1819. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
  1820. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
  1821. ring->signal_mbox[RCS] = GEN6_RVESYNC;
  1822. ring->signal_mbox[VCS] = GEN6_VVESYNC;
  1823. ring->signal_mbox[BCS] = GEN6_BVESYNC;
  1824. ring->signal_mbox[VECS] = GEN6_NOSYNC;
  1825. ring->init = init_ring_common;
  1826. return intel_init_ring_buffer(dev, ring);
  1827. }
  1828. int
  1829. intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
  1830. {
  1831. int ret;
  1832. if (!ring->gpu_caches_dirty)
  1833. return 0;
  1834. ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
  1835. if (ret)
  1836. return ret;
  1837. trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
  1838. ring->gpu_caches_dirty = false;
  1839. return 0;
  1840. }
  1841. int
  1842. intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
  1843. {
  1844. uint32_t flush_domains;
  1845. int ret;
  1846. flush_domains = 0;
  1847. if (ring->gpu_caches_dirty)
  1848. flush_domains = I915_GEM_GPU_DOMAINS;
  1849. ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
  1850. if (ret)
  1851. return ret;
  1852. trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
  1853. ring->gpu_caches_dirty = false;
  1854. return 0;
  1855. }