intel_ringbuffer.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include <linux/log2.h>
  30. #include <drm/drmP.h>
  31. #include "i915_drv.h"
  32. #include <drm/i915_drm.h>
  33. #include "i915_trace.h"
  34. #include "intel_drv.h"
  35. /* Rough estimate of the typical request size, performing a flush,
  36. * set-context and then emitting the batch.
  37. */
  38. #define LEGACY_REQUEST_SIZE 200
  39. static int __intel_ring_space(int head, int tail, int size)
  40. {
  41. int space = head - tail;
  42. if (space <= 0)
  43. space += size;
  44. return space - I915_RING_FREE_SPACE;
  45. }
  46. void intel_ring_update_space(struct intel_ring *ring)
  47. {
  48. ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
  49. }
  50. static int
  51. gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  52. {
  53. u32 cmd, *cs;
  54. cmd = MI_FLUSH;
  55. if (mode & EMIT_INVALIDATE)
  56. cmd |= MI_READ_FLUSH;
  57. cs = intel_ring_begin(req, 2);
  58. if (IS_ERR(cs))
  59. return PTR_ERR(cs);
  60. *cs++ = cmd;
  61. *cs++ = MI_NOOP;
  62. intel_ring_advance(req, cs);
  63. return 0;
  64. }
  65. static int
  66. gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  67. {
  68. u32 cmd, *cs;
  69. /*
  70. * read/write caches:
  71. *
  72. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  73. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  74. * also flushed at 2d versus 3d pipeline switches.
  75. *
  76. * read-only caches:
  77. *
  78. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  79. * MI_READ_FLUSH is set, and is always flushed on 965.
  80. *
  81. * I915_GEM_DOMAIN_COMMAND may not exist?
  82. *
  83. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  84. * invalidated when MI_EXE_FLUSH is set.
  85. *
  86. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  87. * invalidated with every MI_FLUSH.
  88. *
  89. * TLBs:
  90. *
  91. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  92. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  93. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  94. * are flushed at any MI_FLUSH.
  95. */
  96. cmd = MI_FLUSH;
  97. if (mode & EMIT_INVALIDATE) {
  98. cmd |= MI_EXE_FLUSH;
  99. if (IS_G4X(req->i915) || IS_GEN5(req->i915))
  100. cmd |= MI_INVALIDATE_ISP;
  101. }
  102. cs = intel_ring_begin(req, 2);
  103. if (IS_ERR(cs))
  104. return PTR_ERR(cs);
  105. *cs++ = cmd;
  106. *cs++ = MI_NOOP;
  107. intel_ring_advance(req, cs);
  108. return 0;
  109. }
  110. /**
  111. * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  112. * implementing two workarounds on gen6. From section 1.4.7.1
  113. * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
  114. *
  115. * [DevSNB-C+{W/A}] Before any depth stall flush (including those
  116. * produced by non-pipelined state commands), software needs to first
  117. * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
  118. * 0.
  119. *
  120. * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
  121. * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
  122. *
  123. * And the workaround for these two requires this workaround first:
  124. *
  125. * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
  126. * BEFORE the pipe-control with a post-sync op and no write-cache
  127. * flushes.
  128. *
  129. * And this last workaround is tricky because of the requirements on
  130. * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
  131. * volume 2 part 1:
  132. *
  133. * "1 of the following must also be set:
  134. * - Render Target Cache Flush Enable ([12] of DW1)
  135. * - Depth Cache Flush Enable ([0] of DW1)
  136. * - Stall at Pixel Scoreboard ([1] of DW1)
  137. * - Depth Stall ([13] of DW1)
  138. * - Post-Sync Operation ([13] of DW1)
  139. * - Notify Enable ([8] of DW1)"
  140. *
  141. * The cache flushes require the workaround flush that triggered this
  142. * one, so we can't use it. Depth stall would trigger the same.
  143. * Post-sync nonzero is what triggered this second workaround, so we
  144. * can't use that one either. Notify enable is IRQs, which aren't
  145. * really our business. That leaves only stall at scoreboard.
  146. */
  147. static int
  148. intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
  149. {
  150. u32 scratch_addr =
  151. i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
  152. u32 *cs;
  153. cs = intel_ring_begin(req, 6);
  154. if (IS_ERR(cs))
  155. return PTR_ERR(cs);
  156. *cs++ = GFX_OP_PIPE_CONTROL(5);
  157. *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
  158. *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
  159. *cs++ = 0; /* low dword */
  160. *cs++ = 0; /* high dword */
  161. *cs++ = MI_NOOP;
  162. intel_ring_advance(req, cs);
  163. cs = intel_ring_begin(req, 6);
  164. if (IS_ERR(cs))
  165. return PTR_ERR(cs);
  166. *cs++ = GFX_OP_PIPE_CONTROL(5);
  167. *cs++ = PIPE_CONTROL_QW_WRITE;
  168. *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
  169. *cs++ = 0;
  170. *cs++ = 0;
  171. *cs++ = MI_NOOP;
  172. intel_ring_advance(req, cs);
  173. return 0;
  174. }
  175. static int
  176. gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  177. {
  178. u32 scratch_addr =
  179. i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
  180. u32 *cs, flags = 0;
  181. int ret;
  182. /* Force SNB workarounds for PIPE_CONTROL flushes */
  183. ret = intel_emit_post_sync_nonzero_flush(req);
  184. if (ret)
  185. return ret;
  186. /* Just flush everything. Experiments have shown that reducing the
  187. * number of bits based on the write domains has little performance
  188. * impact.
  189. */
  190. if (mode & EMIT_FLUSH) {
  191. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  192. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  193. /*
  194. * Ensure that any following seqno writes only happen
  195. * when the render cache is indeed flushed.
  196. */
  197. flags |= PIPE_CONTROL_CS_STALL;
  198. }
  199. if (mode & EMIT_INVALIDATE) {
  200. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  201. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  202. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  203. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  204. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  205. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  206. /*
  207. * TLB invalidate requires a post-sync write.
  208. */
  209. flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
  210. }
  211. cs = intel_ring_begin(req, 4);
  212. if (IS_ERR(cs))
  213. return PTR_ERR(cs);
  214. *cs++ = GFX_OP_PIPE_CONTROL(4);
  215. *cs++ = flags;
  216. *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
  217. *cs++ = 0;
  218. intel_ring_advance(req, cs);
  219. return 0;
  220. }
  221. static int
  222. gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
  223. {
  224. u32 *cs;
  225. cs = intel_ring_begin(req, 4);
  226. if (IS_ERR(cs))
  227. return PTR_ERR(cs);
  228. *cs++ = GFX_OP_PIPE_CONTROL(4);
  229. *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
  230. *cs++ = 0;
  231. *cs++ = 0;
  232. intel_ring_advance(req, cs);
  233. return 0;
  234. }
  235. static int
  236. gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  237. {
  238. u32 scratch_addr =
  239. i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
  240. u32 *cs, flags = 0;
  241. /*
  242. * Ensure that any following seqno writes only happen when the render
  243. * cache is indeed flushed.
  244. *
  245. * Workaround: 4th PIPE_CONTROL command (except the ones with only
  246. * read-cache invalidate bits set) must have the CS_STALL bit set. We
  247. * don't try to be clever and just set it unconditionally.
  248. */
  249. flags |= PIPE_CONTROL_CS_STALL;
  250. /* Just flush everything. Experiments have shown that reducing the
  251. * number of bits based on the write domains has little performance
  252. * impact.
  253. */
  254. if (mode & EMIT_FLUSH) {
  255. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  256. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  257. flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
  258. flags |= PIPE_CONTROL_FLUSH_ENABLE;
  259. }
  260. if (mode & EMIT_INVALIDATE) {
  261. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  262. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  263. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  264. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  265. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  266. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  267. flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
  268. /*
  269. * TLB invalidate requires a post-sync write.
  270. */
  271. flags |= PIPE_CONTROL_QW_WRITE;
  272. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  273. flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
  274. /* Workaround: we must issue a pipe_control with CS-stall bit
  275. * set before a pipe_control command that has the state cache
  276. * invalidate bit set. */
  277. gen7_render_ring_cs_stall_wa(req);
  278. }
  279. cs = intel_ring_begin(req, 4);
  280. if (IS_ERR(cs))
  281. return PTR_ERR(cs);
  282. *cs++ = GFX_OP_PIPE_CONTROL(4);
  283. *cs++ = flags;
  284. *cs++ = scratch_addr;
  285. *cs++ = 0;
  286. intel_ring_advance(req, cs);
  287. return 0;
  288. }
  289. static int
  290. gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  291. {
  292. u32 flags;
  293. u32 *cs;
  294. cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
  295. if (IS_ERR(cs))
  296. return PTR_ERR(cs);
  297. flags = PIPE_CONTROL_CS_STALL;
  298. if (mode & EMIT_FLUSH) {
  299. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  300. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  301. flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
  302. flags |= PIPE_CONTROL_FLUSH_ENABLE;
  303. }
  304. if (mode & EMIT_INVALIDATE) {
  305. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  306. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  307. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  308. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  309. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  310. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  311. flags |= PIPE_CONTROL_QW_WRITE;
  312. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  313. /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
  314. cs = gen8_emit_pipe_control(cs,
  315. PIPE_CONTROL_CS_STALL |
  316. PIPE_CONTROL_STALL_AT_SCOREBOARD,
  317. 0);
  318. }
  319. cs = gen8_emit_pipe_control(cs, flags,
  320. i915_ggtt_offset(req->engine->scratch) +
  321. 2 * CACHELINE_BYTES);
  322. intel_ring_advance(req, cs);
  323. return 0;
  324. }
  325. static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
  326. {
  327. struct drm_i915_private *dev_priv = engine->i915;
  328. u32 addr;
  329. addr = dev_priv->status_page_dmah->busaddr;
  330. if (INTEL_GEN(dev_priv) >= 4)
  331. addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  332. I915_WRITE(HWS_PGA, addr);
  333. }
  334. static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
  335. {
  336. struct drm_i915_private *dev_priv = engine->i915;
  337. i915_reg_t mmio;
  338. /* The ring status page addresses are no longer next to the rest of
  339. * the ring registers as of gen7.
  340. */
  341. if (IS_GEN7(dev_priv)) {
  342. switch (engine->id) {
  343. case RCS:
  344. mmio = RENDER_HWS_PGA_GEN7;
  345. break;
  346. case BCS:
  347. mmio = BLT_HWS_PGA_GEN7;
  348. break;
  349. /*
  350. * VCS2 actually doesn't exist on Gen7. Only shut up
  351. * gcc switch check warning
  352. */
  353. case VCS2:
  354. case VCS:
  355. mmio = BSD_HWS_PGA_GEN7;
  356. break;
  357. case VECS:
  358. mmio = VEBOX_HWS_PGA_GEN7;
  359. break;
  360. }
  361. } else if (IS_GEN6(dev_priv)) {
  362. mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
  363. } else {
  364. /* XXX: gen8 returns to sanity */
  365. mmio = RING_HWS_PGA(engine->mmio_base);
  366. }
  367. I915_WRITE(mmio, engine->status_page.ggtt_offset);
  368. POSTING_READ(mmio);
  369. /*
  370. * Flush the TLB for this page
  371. *
  372. * FIXME: These two bits have disappeared on gen8, so a question
  373. * arises: do we still need this and if so how should we go about
  374. * invalidating the TLB?
  375. */
  376. if (IS_GEN(dev_priv, 6, 7)) {
  377. i915_reg_t reg = RING_INSTPM(engine->mmio_base);
  378. /* ring should be idle before issuing a sync flush*/
  379. WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
  380. I915_WRITE(reg,
  381. _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
  382. INSTPM_SYNC_FLUSH));
  383. if (intel_wait_for_register(dev_priv,
  384. reg, INSTPM_SYNC_FLUSH, 0,
  385. 1000))
  386. DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
  387. engine->name);
  388. }
  389. }
  390. static bool stop_ring(struct intel_engine_cs *engine)
  391. {
  392. struct drm_i915_private *dev_priv = engine->i915;
  393. if (INTEL_GEN(dev_priv) > 2) {
  394. I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
  395. if (intel_wait_for_register(dev_priv,
  396. RING_MI_MODE(engine->mmio_base),
  397. MODE_IDLE,
  398. MODE_IDLE,
  399. 1000)) {
  400. DRM_ERROR("%s : timed out trying to stop ring\n",
  401. engine->name);
  402. /* Sometimes we observe that the idle flag is not
  403. * set even though the ring is empty. So double
  404. * check before giving up.
  405. */
  406. if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
  407. return false;
  408. }
  409. }
  410. I915_WRITE_CTL(engine, 0);
  411. I915_WRITE_HEAD(engine, 0);
  412. I915_WRITE_TAIL(engine, 0);
  413. if (INTEL_GEN(dev_priv) > 2) {
  414. (void)I915_READ_CTL(engine);
  415. I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
  416. }
  417. return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
  418. }
  419. static int init_ring_common(struct intel_engine_cs *engine)
  420. {
  421. struct drm_i915_private *dev_priv = engine->i915;
  422. struct intel_ring *ring = engine->buffer;
  423. int ret = 0;
  424. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  425. if (!stop_ring(engine)) {
  426. /* G45 ring initialization often fails to reset head to zero */
  427. DRM_DEBUG_KMS("%s head not reset to zero "
  428. "ctl %08x head %08x tail %08x start %08x\n",
  429. engine->name,
  430. I915_READ_CTL(engine),
  431. I915_READ_HEAD(engine),
  432. I915_READ_TAIL(engine),
  433. I915_READ_START(engine));
  434. if (!stop_ring(engine)) {
  435. DRM_ERROR("failed to set %s head to zero "
  436. "ctl %08x head %08x tail %08x start %08x\n",
  437. engine->name,
  438. I915_READ_CTL(engine),
  439. I915_READ_HEAD(engine),
  440. I915_READ_TAIL(engine),
  441. I915_READ_START(engine));
  442. ret = -EIO;
  443. goto out;
  444. }
  445. }
  446. if (HWS_NEEDS_PHYSICAL(dev_priv))
  447. ring_setup_phys_status_page(engine);
  448. else
  449. intel_ring_setup_status_page(engine);
  450. intel_engine_reset_breadcrumbs(engine);
  451. /* Enforce ordering by reading HEAD register back */
  452. I915_READ_HEAD(engine);
  453. /* Initialize the ring. This must happen _after_ we've cleared the ring
  454. * registers with the above sequence (the readback of the HEAD registers
  455. * also enforces ordering), otherwise the hw might lose the new ring
  456. * register values. */
  457. I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
  458. /* WaClearRingBufHeadRegAtInit:ctg,elk */
  459. if (I915_READ_HEAD(engine))
  460. DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
  461. engine->name, I915_READ_HEAD(engine));
  462. intel_ring_update_space(ring);
  463. I915_WRITE_HEAD(engine, ring->head);
  464. I915_WRITE_TAIL(engine, ring->tail);
  465. (void)I915_READ_TAIL(engine);
  466. I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
  467. /* If the head is still not zero, the ring is dead */
  468. if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
  469. RING_VALID, RING_VALID,
  470. 50)) {
  471. DRM_ERROR("%s initialization failed "
  472. "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
  473. engine->name,
  474. I915_READ_CTL(engine),
  475. I915_READ_CTL(engine) & RING_VALID,
  476. I915_READ_HEAD(engine), ring->head,
  477. I915_READ_TAIL(engine), ring->tail,
  478. I915_READ_START(engine),
  479. i915_ggtt_offset(ring->vma));
  480. ret = -EIO;
  481. goto out;
  482. }
  483. intel_engine_init_hangcheck(engine);
  484. out:
  485. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  486. return ret;
  487. }
  488. static void reset_ring_common(struct intel_engine_cs *engine,
  489. struct drm_i915_gem_request *request)
  490. {
  491. /* Try to restore the logical GPU state to match the continuation
  492. * of the request queue. If we skip the context/PD restore, then
  493. * the next request may try to execute assuming that its context
  494. * is valid and loaded on the GPU and so may try to access invalid
  495. * memory, prompting repeated GPU hangs.
  496. *
  497. * If the request was guilty, we still restore the logical state
  498. * in case the next request requires it (e.g. the aliasing ppgtt),
  499. * but skip over the hung batch.
  500. *
  501. * If the request was innocent, we try to replay the request with
  502. * the restored context.
  503. */
  504. if (request) {
  505. struct drm_i915_private *dev_priv = request->i915;
  506. struct intel_context *ce = &request->ctx->engine[engine->id];
  507. struct i915_hw_ppgtt *ppgtt;
  508. /* FIXME consider gen8 reset */
  509. if (ce->state) {
  510. I915_WRITE(CCID,
  511. i915_ggtt_offset(ce->state) |
  512. BIT(8) /* must be set! */ |
  513. CCID_EXTENDED_STATE_SAVE |
  514. CCID_EXTENDED_STATE_RESTORE |
  515. CCID_EN);
  516. }
  517. ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
  518. if (ppgtt) {
  519. u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
  520. I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
  521. I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
  522. /* Wait for the PD reload to complete */
  523. if (intel_wait_for_register(dev_priv,
  524. RING_PP_DIR_BASE(engine),
  525. BIT(0), 0,
  526. 10))
  527. DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
  528. ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
  529. }
  530. /* If the rq hung, jump to its breadcrumb and skip the batch */
  531. if (request->fence.error == -EIO)
  532. request->ring->head = request->postfix;
  533. } else {
  534. engine->legacy_active_context = NULL;
  535. }
  536. }
  537. static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
  538. {
  539. int ret;
  540. ret = intel_ring_workarounds_emit(req);
  541. if (ret != 0)
  542. return ret;
  543. ret = i915_gem_render_state_emit(req);
  544. if (ret)
  545. return ret;
  546. return 0;
  547. }
  548. static int init_render_ring(struct intel_engine_cs *engine)
  549. {
  550. struct drm_i915_private *dev_priv = engine->i915;
  551. int ret = init_ring_common(engine);
  552. if (ret)
  553. return ret;
  554. /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
  555. if (IS_GEN(dev_priv, 4, 6))
  556. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  557. /* We need to disable the AsyncFlip performance optimisations in order
  558. * to use MI_WAIT_FOR_EVENT within the CS. It should already be
  559. * programmed to '1' on all products.
  560. *
  561. * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
  562. */
  563. if (IS_GEN(dev_priv, 6, 7))
  564. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  565. /* Required for the hardware to program scanline values for waiting */
  566. /* WaEnableFlushTlbInvalidationMode:snb */
  567. if (IS_GEN6(dev_priv))
  568. I915_WRITE(GFX_MODE,
  569. _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
  570. /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
  571. if (IS_GEN7(dev_priv))
  572. I915_WRITE(GFX_MODE_GEN7,
  573. _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
  574. _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  575. if (IS_GEN6(dev_priv)) {
  576. /* From the Sandybridge PRM, volume 1 part 3, page 24:
  577. * "If this bit is set, STCunit will have LRA as replacement
  578. * policy. [...] This bit must be reset. LRA replacement
  579. * policy is not supported."
  580. */
  581. I915_WRITE(CACHE_MODE_0,
  582. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  583. }
  584. if (IS_GEN(dev_priv, 6, 7))
  585. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  586. if (INTEL_INFO(dev_priv)->gen >= 6)
  587. I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
  588. return init_workarounds_ring(engine);
  589. }
  590. static void render_ring_cleanup(struct intel_engine_cs *engine)
  591. {
  592. struct drm_i915_private *dev_priv = engine->i915;
  593. i915_vma_unpin_and_release(&dev_priv->semaphore);
  594. }
  595. static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
  596. {
  597. struct drm_i915_private *dev_priv = req->i915;
  598. struct intel_engine_cs *waiter;
  599. enum intel_engine_id id;
  600. for_each_engine(waiter, dev_priv, id) {
  601. u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
  602. if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
  603. continue;
  604. *cs++ = GFX_OP_PIPE_CONTROL(6);
  605. *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
  606. PIPE_CONTROL_CS_STALL;
  607. *cs++ = lower_32_bits(gtt_offset);
  608. *cs++ = upper_32_bits(gtt_offset);
  609. *cs++ = req->global_seqno;
  610. *cs++ = 0;
  611. *cs++ = MI_SEMAPHORE_SIGNAL |
  612. MI_SEMAPHORE_TARGET(waiter->hw_id);
  613. *cs++ = 0;
  614. }
  615. return cs;
  616. }
  617. static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
  618. {
  619. struct drm_i915_private *dev_priv = req->i915;
  620. struct intel_engine_cs *waiter;
  621. enum intel_engine_id id;
  622. for_each_engine(waiter, dev_priv, id) {
  623. u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
  624. if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
  625. continue;
  626. *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
  627. *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
  628. *cs++ = upper_32_bits(gtt_offset);
  629. *cs++ = req->global_seqno;
  630. *cs++ = MI_SEMAPHORE_SIGNAL |
  631. MI_SEMAPHORE_TARGET(waiter->hw_id);
  632. *cs++ = 0;
  633. }
  634. return cs;
  635. }
  636. static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
  637. {
  638. struct drm_i915_private *dev_priv = req->i915;
  639. struct intel_engine_cs *engine;
  640. enum intel_engine_id id;
  641. int num_rings = 0;
  642. for_each_engine(engine, dev_priv, id) {
  643. i915_reg_t mbox_reg;
  644. if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
  645. continue;
  646. mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
  647. if (i915_mmio_reg_valid(mbox_reg)) {
  648. *cs++ = MI_LOAD_REGISTER_IMM(1);
  649. *cs++ = i915_mmio_reg_offset(mbox_reg);
  650. *cs++ = req->global_seqno;
  651. num_rings++;
  652. }
  653. }
  654. if (num_rings & 1)
  655. *cs++ = MI_NOOP;
  656. return cs;
  657. }
  658. static void i9xx_submit_request(struct drm_i915_gem_request *request)
  659. {
  660. struct drm_i915_private *dev_priv = request->i915;
  661. i915_gem_request_submit(request);
  662. I915_WRITE_TAIL(request->engine,
  663. intel_ring_set_tail(request->ring, request->tail));
  664. }
  665. static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
  666. {
  667. *cs++ = MI_STORE_DWORD_INDEX;
  668. *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
  669. *cs++ = req->global_seqno;
  670. *cs++ = MI_USER_INTERRUPT;
  671. req->tail = intel_ring_offset(req, cs);
  672. assert_ring_tail_valid(req->ring, req->tail);
  673. }
  674. static const int i9xx_emit_breadcrumb_sz = 4;
  675. /**
  676. * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
  677. *
  678. * @request - request to write to the ring
  679. *
  680. * Update the mailbox registers in the *other* rings with the current seqno.
  681. * This acts like a signal in the canonical semaphore.
  682. */
  683. static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
  684. {
  685. return i9xx_emit_breadcrumb(req,
  686. req->engine->semaphore.signal(req, cs));
  687. }
  688. static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
  689. u32 *cs)
  690. {
  691. struct intel_engine_cs *engine = req->engine;
  692. if (engine->semaphore.signal)
  693. cs = engine->semaphore.signal(req, cs);
  694. *cs++ = GFX_OP_PIPE_CONTROL(6);
  695. *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
  696. PIPE_CONTROL_QW_WRITE;
  697. *cs++ = intel_hws_seqno_address(engine);
  698. *cs++ = 0;
  699. *cs++ = req->global_seqno;
  700. /* We're thrashing one dword of HWS. */
  701. *cs++ = 0;
  702. *cs++ = MI_USER_INTERRUPT;
  703. *cs++ = MI_NOOP;
  704. req->tail = intel_ring_offset(req, cs);
  705. assert_ring_tail_valid(req->ring, req->tail);
  706. }
  707. static const int gen8_render_emit_breadcrumb_sz = 8;
  708. /**
  709. * intel_ring_sync - sync the waiter to the signaller on seqno
  710. *
  711. * @waiter - ring that is waiting
  712. * @signaller - ring which has, or will signal
  713. * @seqno - seqno which the waiter will block on
  714. */
  715. static int
  716. gen8_ring_sync_to(struct drm_i915_gem_request *req,
  717. struct drm_i915_gem_request *signal)
  718. {
  719. struct drm_i915_private *dev_priv = req->i915;
  720. u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
  721. struct i915_hw_ppgtt *ppgtt;
  722. u32 *cs;
  723. cs = intel_ring_begin(req, 4);
  724. if (IS_ERR(cs))
  725. return PTR_ERR(cs);
  726. *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
  727. MI_SEMAPHORE_SAD_GTE_SDD;
  728. *cs++ = signal->global_seqno;
  729. *cs++ = lower_32_bits(offset);
  730. *cs++ = upper_32_bits(offset);
  731. intel_ring_advance(req, cs);
  732. /* When the !RCS engines idle waiting upon a semaphore, they lose their
  733. * pagetables and we must reload them before executing the batch.
  734. * We do this on the i915_switch_context() following the wait and
  735. * before the dispatch.
  736. */
  737. ppgtt = req->ctx->ppgtt;
  738. if (ppgtt && req->engine->id != RCS)
  739. ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
  740. return 0;
  741. }
  742. static int
  743. gen6_ring_sync_to(struct drm_i915_gem_request *req,
  744. struct drm_i915_gem_request *signal)
  745. {
  746. u32 dw1 = MI_SEMAPHORE_MBOX |
  747. MI_SEMAPHORE_COMPARE |
  748. MI_SEMAPHORE_REGISTER;
  749. u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
  750. u32 *cs;
  751. WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
  752. cs = intel_ring_begin(req, 4);
  753. if (IS_ERR(cs))
  754. return PTR_ERR(cs);
  755. *cs++ = dw1 | wait_mbox;
  756. /* Throughout all of the GEM code, seqno passed implies our current
  757. * seqno is >= the last seqno executed. However for hardware the
  758. * comparison is strictly greater than.
  759. */
  760. *cs++ = signal->global_seqno - 1;
  761. *cs++ = 0;
  762. *cs++ = MI_NOOP;
  763. intel_ring_advance(req, cs);
  764. return 0;
  765. }
  766. static void
  767. gen5_seqno_barrier(struct intel_engine_cs *engine)
  768. {
  769. /* MI_STORE are internally buffered by the GPU and not flushed
  770. * either by MI_FLUSH or SyncFlush or any other combination of
  771. * MI commands.
  772. *
  773. * "Only the submission of the store operation is guaranteed.
  774. * The write result will be complete (coherent) some time later
  775. * (this is practically a finite period but there is no guaranteed
  776. * latency)."
  777. *
  778. * Empirically, we observe that we need a delay of at least 75us to
  779. * be sure that the seqno write is visible by the CPU.
  780. */
  781. usleep_range(125, 250);
  782. }
  783. static void
  784. gen6_seqno_barrier(struct intel_engine_cs *engine)
  785. {
  786. struct drm_i915_private *dev_priv = engine->i915;
  787. /* Workaround to force correct ordering between irq and seqno writes on
  788. * ivb (and maybe also on snb) by reading from a CS register (like
  789. * ACTHD) before reading the status page.
  790. *
  791. * Note that this effectively stalls the read by the time it takes to
  792. * do a memory transaction, which more or less ensures that the write
  793. * from the GPU has sufficient time to invalidate the CPU cacheline.
  794. * Alternatively we could delay the interrupt from the CS ring to give
  795. * the write time to land, but that would incur a delay after every
  796. * batch i.e. much more frequent than a delay when waiting for the
  797. * interrupt (with the same net latency).
  798. *
  799. * Also note that to prevent whole machine hangs on gen7, we have to
  800. * take the spinlock to guard against concurrent cacheline access.
  801. */
  802. spin_lock_irq(&dev_priv->uncore.lock);
  803. POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
  804. spin_unlock_irq(&dev_priv->uncore.lock);
  805. }
  806. static void
  807. gen5_irq_enable(struct intel_engine_cs *engine)
  808. {
  809. gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
  810. }
  811. static void
  812. gen5_irq_disable(struct intel_engine_cs *engine)
  813. {
  814. gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
  815. }
  816. static void
  817. i9xx_irq_enable(struct intel_engine_cs *engine)
  818. {
  819. struct drm_i915_private *dev_priv = engine->i915;
  820. dev_priv->irq_mask &= ~engine->irq_enable_mask;
  821. I915_WRITE(IMR, dev_priv->irq_mask);
  822. POSTING_READ_FW(RING_IMR(engine->mmio_base));
  823. }
  824. static void
  825. i9xx_irq_disable(struct intel_engine_cs *engine)
  826. {
  827. struct drm_i915_private *dev_priv = engine->i915;
  828. dev_priv->irq_mask |= engine->irq_enable_mask;
  829. I915_WRITE(IMR, dev_priv->irq_mask);
  830. }
  831. static void
  832. i8xx_irq_enable(struct intel_engine_cs *engine)
  833. {
  834. struct drm_i915_private *dev_priv = engine->i915;
  835. dev_priv->irq_mask &= ~engine->irq_enable_mask;
  836. I915_WRITE16(IMR, dev_priv->irq_mask);
  837. POSTING_READ16(RING_IMR(engine->mmio_base));
  838. }
  839. static void
  840. i8xx_irq_disable(struct intel_engine_cs *engine)
  841. {
  842. struct drm_i915_private *dev_priv = engine->i915;
  843. dev_priv->irq_mask |= engine->irq_enable_mask;
  844. I915_WRITE16(IMR, dev_priv->irq_mask);
  845. }
  846. static int
  847. bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  848. {
  849. u32 *cs;
  850. cs = intel_ring_begin(req, 2);
  851. if (IS_ERR(cs))
  852. return PTR_ERR(cs);
  853. *cs++ = MI_FLUSH;
  854. *cs++ = MI_NOOP;
  855. intel_ring_advance(req, cs);
  856. return 0;
  857. }
  858. static void
  859. gen6_irq_enable(struct intel_engine_cs *engine)
  860. {
  861. struct drm_i915_private *dev_priv = engine->i915;
  862. I915_WRITE_IMR(engine,
  863. ~(engine->irq_enable_mask |
  864. engine->irq_keep_mask));
  865. gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
  866. }
  867. static void
  868. gen6_irq_disable(struct intel_engine_cs *engine)
  869. {
  870. struct drm_i915_private *dev_priv = engine->i915;
  871. I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
  872. gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
  873. }
  874. static void
  875. hsw_vebox_irq_enable(struct intel_engine_cs *engine)
  876. {
  877. struct drm_i915_private *dev_priv = engine->i915;
  878. I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
  879. gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
  880. }
  881. static void
  882. hsw_vebox_irq_disable(struct intel_engine_cs *engine)
  883. {
  884. struct drm_i915_private *dev_priv = engine->i915;
  885. I915_WRITE_IMR(engine, ~0);
  886. gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
  887. }
  888. static void
  889. gen8_irq_enable(struct intel_engine_cs *engine)
  890. {
  891. struct drm_i915_private *dev_priv = engine->i915;
  892. I915_WRITE_IMR(engine,
  893. ~(engine->irq_enable_mask |
  894. engine->irq_keep_mask));
  895. POSTING_READ_FW(RING_IMR(engine->mmio_base));
  896. }
  897. static void
  898. gen8_irq_disable(struct intel_engine_cs *engine)
  899. {
  900. struct drm_i915_private *dev_priv = engine->i915;
  901. I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
  902. }
  903. static int
  904. i965_emit_bb_start(struct drm_i915_gem_request *req,
  905. u64 offset, u32 length,
  906. unsigned int dispatch_flags)
  907. {
  908. u32 *cs;
  909. cs = intel_ring_begin(req, 2);
  910. if (IS_ERR(cs))
  911. return PTR_ERR(cs);
  912. *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
  913. I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
  914. *cs++ = offset;
  915. intel_ring_advance(req, cs);
  916. return 0;
  917. }
  918. /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
  919. #define I830_BATCH_LIMIT (256*1024)
  920. #define I830_TLB_ENTRIES (2)
  921. #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
  922. static int
  923. i830_emit_bb_start(struct drm_i915_gem_request *req,
  924. u64 offset, u32 len,
  925. unsigned int dispatch_flags)
  926. {
  927. u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
  928. cs = intel_ring_begin(req, 6);
  929. if (IS_ERR(cs))
  930. return PTR_ERR(cs);
  931. /* Evict the invalid PTE TLBs */
  932. *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
  933. *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
  934. *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
  935. *cs++ = cs_offset;
  936. *cs++ = 0xdeadbeef;
  937. *cs++ = MI_NOOP;
  938. intel_ring_advance(req, cs);
  939. if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
  940. if (len > I830_BATCH_LIMIT)
  941. return -ENOSPC;
  942. cs = intel_ring_begin(req, 6 + 2);
  943. if (IS_ERR(cs))
  944. return PTR_ERR(cs);
  945. /* Blit the batch (which has now all relocs applied) to the
  946. * stable batch scratch bo area (so that the CS never
  947. * stumbles over its tlb invalidation bug) ...
  948. */
  949. *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
  950. *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
  951. *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
  952. *cs++ = cs_offset;
  953. *cs++ = 4096;
  954. *cs++ = offset;
  955. *cs++ = MI_FLUSH;
  956. *cs++ = MI_NOOP;
  957. intel_ring_advance(req, cs);
  958. /* ... and execute it. */
  959. offset = cs_offset;
  960. }
  961. cs = intel_ring_begin(req, 2);
  962. if (IS_ERR(cs))
  963. return PTR_ERR(cs);
  964. *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
  965. *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
  966. MI_BATCH_NON_SECURE);
  967. intel_ring_advance(req, cs);
  968. return 0;
  969. }
  970. static int
  971. i915_emit_bb_start(struct drm_i915_gem_request *req,
  972. u64 offset, u32 len,
  973. unsigned int dispatch_flags)
  974. {
  975. u32 *cs;
  976. cs = intel_ring_begin(req, 2);
  977. if (IS_ERR(cs))
  978. return PTR_ERR(cs);
  979. *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
  980. *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
  981. MI_BATCH_NON_SECURE);
  982. intel_ring_advance(req, cs);
  983. return 0;
  984. }
  985. static void cleanup_phys_status_page(struct intel_engine_cs *engine)
  986. {
  987. struct drm_i915_private *dev_priv = engine->i915;
  988. if (!dev_priv->status_page_dmah)
  989. return;
  990. drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
  991. engine->status_page.page_addr = NULL;
  992. }
  993. static void cleanup_status_page(struct intel_engine_cs *engine)
  994. {
  995. struct i915_vma *vma;
  996. struct drm_i915_gem_object *obj;
  997. vma = fetch_and_zero(&engine->status_page.vma);
  998. if (!vma)
  999. return;
  1000. obj = vma->obj;
  1001. i915_vma_unpin(vma);
  1002. i915_vma_close(vma);
  1003. i915_gem_object_unpin_map(obj);
  1004. __i915_gem_object_release_unless_active(obj);
  1005. }
  1006. static int init_status_page(struct intel_engine_cs *engine)
  1007. {
  1008. struct drm_i915_gem_object *obj;
  1009. struct i915_vma *vma;
  1010. unsigned int flags;
  1011. void *vaddr;
  1012. int ret;
  1013. obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
  1014. if (IS_ERR(obj)) {
  1015. DRM_ERROR("Failed to allocate status page\n");
  1016. return PTR_ERR(obj);
  1017. }
  1018. ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  1019. if (ret)
  1020. goto err;
  1021. vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
  1022. if (IS_ERR(vma)) {
  1023. ret = PTR_ERR(vma);
  1024. goto err;
  1025. }
  1026. flags = PIN_GLOBAL;
  1027. if (!HAS_LLC(engine->i915))
  1028. /* On g33, we cannot place HWS above 256MiB, so
  1029. * restrict its pinning to the low mappable arena.
  1030. * Though this restriction is not documented for
  1031. * gen4, gen5, or byt, they also behave similarly
  1032. * and hang if the HWS is placed at the top of the
  1033. * GTT. To generalise, it appears that all !llc
  1034. * platforms have issues with us placing the HWS
  1035. * above the mappable region (even though we never
  1036. * actualy map it).
  1037. */
  1038. flags |= PIN_MAPPABLE;
  1039. ret = i915_vma_pin(vma, 0, 4096, flags);
  1040. if (ret)
  1041. goto err;
  1042. vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
  1043. if (IS_ERR(vaddr)) {
  1044. ret = PTR_ERR(vaddr);
  1045. goto err_unpin;
  1046. }
  1047. engine->status_page.vma = vma;
  1048. engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
  1049. engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
  1050. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  1051. engine->name, i915_ggtt_offset(vma));
  1052. return 0;
  1053. err_unpin:
  1054. i915_vma_unpin(vma);
  1055. err:
  1056. i915_gem_object_put(obj);
  1057. return ret;
  1058. }
  1059. static int init_phys_status_page(struct intel_engine_cs *engine)
  1060. {
  1061. struct drm_i915_private *dev_priv = engine->i915;
  1062. GEM_BUG_ON(engine->id != RCS);
  1063. dev_priv->status_page_dmah =
  1064. drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
  1065. if (!dev_priv->status_page_dmah)
  1066. return -ENOMEM;
  1067. engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1068. memset(engine->status_page.page_addr, 0, PAGE_SIZE);
  1069. return 0;
  1070. }
  1071. int intel_ring_pin(struct intel_ring *ring,
  1072. struct drm_i915_private *i915,
  1073. unsigned int offset_bias)
  1074. {
  1075. enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
  1076. struct i915_vma *vma = ring->vma;
  1077. unsigned int flags;
  1078. void *addr;
  1079. int ret;
  1080. GEM_BUG_ON(ring->vaddr);
  1081. flags = PIN_GLOBAL;
  1082. if (offset_bias)
  1083. flags |= PIN_OFFSET_BIAS | offset_bias;
  1084. if (vma->obj->stolen)
  1085. flags |= PIN_MAPPABLE;
  1086. if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
  1087. if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
  1088. ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
  1089. else
  1090. ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
  1091. if (unlikely(ret))
  1092. return ret;
  1093. }
  1094. ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
  1095. if (unlikely(ret))
  1096. return ret;
  1097. if (i915_vma_is_map_and_fenceable(vma))
  1098. addr = (void __force *)i915_vma_pin_iomap(vma);
  1099. else
  1100. addr = i915_gem_object_pin_map(vma->obj, map);
  1101. if (IS_ERR(addr))
  1102. goto err;
  1103. ring->vaddr = addr;
  1104. return 0;
  1105. err:
  1106. i915_vma_unpin(vma);
  1107. return PTR_ERR(addr);
  1108. }
  1109. void intel_ring_reset(struct intel_ring *ring, u32 tail)
  1110. {
  1111. GEM_BUG_ON(!list_empty(&ring->request_list));
  1112. ring->tail = tail;
  1113. ring->head = tail;
  1114. ring->emit = tail;
  1115. intel_ring_update_space(ring);
  1116. }
  1117. void intel_ring_unpin(struct intel_ring *ring)
  1118. {
  1119. GEM_BUG_ON(!ring->vma);
  1120. GEM_BUG_ON(!ring->vaddr);
  1121. /* Discard any unused bytes beyond that submitted to hw. */
  1122. intel_ring_reset(ring, ring->tail);
  1123. if (i915_vma_is_map_and_fenceable(ring->vma))
  1124. i915_vma_unpin_iomap(ring->vma);
  1125. else
  1126. i915_gem_object_unpin_map(ring->vma->obj);
  1127. ring->vaddr = NULL;
  1128. i915_vma_unpin(ring->vma);
  1129. }
  1130. static struct i915_vma *
  1131. intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
  1132. {
  1133. struct drm_i915_gem_object *obj;
  1134. struct i915_vma *vma;
  1135. obj = i915_gem_object_create_stolen(dev_priv, size);
  1136. if (!obj)
  1137. obj = i915_gem_object_create_internal(dev_priv, size);
  1138. if (IS_ERR(obj))
  1139. return ERR_CAST(obj);
  1140. /* mark ring buffers as read-only from GPU side by default */
  1141. obj->gt_ro = 1;
  1142. vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
  1143. if (IS_ERR(vma))
  1144. goto err;
  1145. return vma;
  1146. err:
  1147. i915_gem_object_put(obj);
  1148. return vma;
  1149. }
  1150. struct intel_ring *
  1151. intel_engine_create_ring(struct intel_engine_cs *engine, int size)
  1152. {
  1153. struct intel_ring *ring;
  1154. struct i915_vma *vma;
  1155. GEM_BUG_ON(!is_power_of_2(size));
  1156. GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
  1157. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  1158. if (!ring)
  1159. return ERR_PTR(-ENOMEM);
  1160. INIT_LIST_HEAD(&ring->request_list);
  1161. ring->size = size;
  1162. /* Workaround an erratum on the i830 which causes a hang if
  1163. * the TAIL pointer points to within the last 2 cachelines
  1164. * of the buffer.
  1165. */
  1166. ring->effective_size = size;
  1167. if (IS_I830(engine->i915) || IS_I845G(engine->i915))
  1168. ring->effective_size -= 2 * CACHELINE_BYTES;
  1169. intel_ring_update_space(ring);
  1170. vma = intel_ring_create_vma(engine->i915, size);
  1171. if (IS_ERR(vma)) {
  1172. kfree(ring);
  1173. return ERR_CAST(vma);
  1174. }
  1175. ring->vma = vma;
  1176. return ring;
  1177. }
  1178. void
  1179. intel_ring_free(struct intel_ring *ring)
  1180. {
  1181. struct drm_i915_gem_object *obj = ring->vma->obj;
  1182. i915_vma_close(ring->vma);
  1183. __i915_gem_object_release_unless_active(obj);
  1184. kfree(ring);
  1185. }
  1186. static int context_pin(struct i915_gem_context *ctx)
  1187. {
  1188. struct i915_vma *vma = ctx->engine[RCS].state;
  1189. int ret;
  1190. /* Clear this page out of any CPU caches for coherent swap-in/out.
  1191. * We only want to do this on the first bind so that we do not stall
  1192. * on an active context (which by nature is already on the GPU).
  1193. */
  1194. if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
  1195. ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
  1196. if (ret)
  1197. return ret;
  1198. }
  1199. return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
  1200. PIN_GLOBAL | PIN_HIGH);
  1201. }
  1202. static struct i915_vma *
  1203. alloc_context_vma(struct intel_engine_cs *engine)
  1204. {
  1205. struct drm_i915_private *i915 = engine->i915;
  1206. struct drm_i915_gem_object *obj;
  1207. struct i915_vma *vma;
  1208. obj = i915_gem_object_create(i915, engine->context_size);
  1209. if (IS_ERR(obj))
  1210. return ERR_CAST(obj);
  1211. /*
  1212. * Try to make the context utilize L3 as well as LLC.
  1213. *
  1214. * On VLV we don't have L3 controls in the PTEs so we
  1215. * shouldn't touch the cache level, especially as that
  1216. * would make the object snooped which might have a
  1217. * negative performance impact.
  1218. *
  1219. * Snooping is required on non-llc platforms in execlist
  1220. * mode, but since all GGTT accesses use PAT entry 0 we
  1221. * get snooping anyway regardless of cache_level.
  1222. *
  1223. * This is only applicable for Ivy Bridge devices since
  1224. * later platforms don't have L3 control bits in the PTE.
  1225. */
  1226. if (IS_IVYBRIDGE(i915)) {
  1227. /* Ignore any error, regard it as a simple optimisation */
  1228. i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
  1229. }
  1230. vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
  1231. if (IS_ERR(vma))
  1232. i915_gem_object_put(obj);
  1233. return vma;
  1234. }
  1235. static int intel_ring_context_pin(struct intel_engine_cs *engine,
  1236. struct i915_gem_context *ctx)
  1237. {
  1238. struct intel_context *ce = &ctx->engine[engine->id];
  1239. int ret;
  1240. lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  1241. if (ce->pin_count++)
  1242. return 0;
  1243. GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
  1244. if (!ce->state && engine->context_size) {
  1245. struct i915_vma *vma;
  1246. vma = alloc_context_vma(engine);
  1247. if (IS_ERR(vma)) {
  1248. ret = PTR_ERR(vma);
  1249. goto error;
  1250. }
  1251. ce->state = vma;
  1252. }
  1253. if (ce->state) {
  1254. ret = context_pin(ctx);
  1255. if (ret)
  1256. goto error;
  1257. ce->state->obj->mm.dirty = true;
  1258. }
  1259. /* The kernel context is only used as a placeholder for flushing the
  1260. * active context. It is never used for submitting user rendering and
  1261. * as such never requires the golden render context, and so we can skip
  1262. * emitting it when we switch to the kernel context. This is required
  1263. * as during eviction we cannot allocate and pin the renderstate in
  1264. * order to initialise the context.
  1265. */
  1266. if (i915_gem_context_is_kernel(ctx))
  1267. ce->initialised = true;
  1268. i915_gem_context_get(ctx);
  1269. return 0;
  1270. error:
  1271. ce->pin_count = 0;
  1272. return ret;
  1273. }
  1274. static void intel_ring_context_unpin(struct intel_engine_cs *engine,
  1275. struct i915_gem_context *ctx)
  1276. {
  1277. struct intel_context *ce = &ctx->engine[engine->id];
  1278. lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  1279. GEM_BUG_ON(ce->pin_count == 0);
  1280. if (--ce->pin_count)
  1281. return;
  1282. if (ce->state)
  1283. i915_vma_unpin(ce->state);
  1284. i915_gem_context_put(ctx);
  1285. }
  1286. static int intel_init_ring_buffer(struct intel_engine_cs *engine)
  1287. {
  1288. struct intel_ring *ring;
  1289. int err;
  1290. intel_engine_setup_common(engine);
  1291. err = intel_engine_init_common(engine);
  1292. if (err)
  1293. goto err;
  1294. if (HWS_NEEDS_PHYSICAL(engine->i915))
  1295. err = init_phys_status_page(engine);
  1296. else
  1297. err = init_status_page(engine);
  1298. if (err)
  1299. goto err;
  1300. ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
  1301. if (IS_ERR(ring)) {
  1302. err = PTR_ERR(ring);
  1303. goto err_hws;
  1304. }
  1305. /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
  1306. err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
  1307. if (err)
  1308. goto err_ring;
  1309. GEM_BUG_ON(engine->buffer);
  1310. engine->buffer = ring;
  1311. return 0;
  1312. err_ring:
  1313. intel_ring_free(ring);
  1314. err_hws:
  1315. if (HWS_NEEDS_PHYSICAL(engine->i915))
  1316. cleanup_phys_status_page(engine);
  1317. else
  1318. cleanup_status_page(engine);
  1319. err:
  1320. intel_engine_cleanup_common(engine);
  1321. return err;
  1322. }
  1323. void intel_engine_cleanup(struct intel_engine_cs *engine)
  1324. {
  1325. struct drm_i915_private *dev_priv = engine->i915;
  1326. WARN_ON(INTEL_GEN(dev_priv) > 2 &&
  1327. (I915_READ_MODE(engine) & MODE_IDLE) == 0);
  1328. intel_ring_unpin(engine->buffer);
  1329. intel_ring_free(engine->buffer);
  1330. if (engine->cleanup)
  1331. engine->cleanup(engine);
  1332. if (HWS_NEEDS_PHYSICAL(dev_priv))
  1333. cleanup_phys_status_page(engine);
  1334. else
  1335. cleanup_status_page(engine);
  1336. intel_engine_cleanup_common(engine);
  1337. dev_priv->engine[engine->id] = NULL;
  1338. kfree(engine);
  1339. }
  1340. void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
  1341. {
  1342. struct intel_engine_cs *engine;
  1343. enum intel_engine_id id;
  1344. /* Restart from the beginning of the rings for convenience */
  1345. for_each_engine(engine, dev_priv, id)
  1346. intel_ring_reset(engine->buffer, 0);
  1347. }
  1348. static int ring_request_alloc(struct drm_i915_gem_request *request)
  1349. {
  1350. u32 *cs;
  1351. GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
  1352. /* Flush enough space to reduce the likelihood of waiting after
  1353. * we start building the request - in which case we will just
  1354. * have to repeat work.
  1355. */
  1356. request->reserved_space += LEGACY_REQUEST_SIZE;
  1357. GEM_BUG_ON(!request->engine->buffer);
  1358. request->ring = request->engine->buffer;
  1359. cs = intel_ring_begin(request, 0);
  1360. if (IS_ERR(cs))
  1361. return PTR_ERR(cs);
  1362. request->reserved_space -= LEGACY_REQUEST_SIZE;
  1363. return 0;
  1364. }
  1365. static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
  1366. {
  1367. struct intel_ring *ring = req->ring;
  1368. struct drm_i915_gem_request *target;
  1369. long timeout;
  1370. lockdep_assert_held(&req->i915->drm.struct_mutex);
  1371. intel_ring_update_space(ring);
  1372. if (ring->space >= bytes)
  1373. return 0;
  1374. /*
  1375. * Space is reserved in the ringbuffer for finalising the request,
  1376. * as that cannot be allowed to fail. During request finalisation,
  1377. * reserved_space is set to 0 to stop the overallocation and the
  1378. * assumption is that then we never need to wait (which has the
  1379. * risk of failing with EINTR).
  1380. *
  1381. * See also i915_gem_request_alloc() and i915_add_request().
  1382. */
  1383. GEM_BUG_ON(!req->reserved_space);
  1384. list_for_each_entry(target, &ring->request_list, ring_link) {
  1385. unsigned space;
  1386. /* Would completion of this request free enough space? */
  1387. space = __intel_ring_space(target->postfix, ring->emit,
  1388. ring->size);
  1389. if (space >= bytes)
  1390. break;
  1391. }
  1392. if (WARN_ON(&target->ring_link == &ring->request_list))
  1393. return -ENOSPC;
  1394. timeout = i915_wait_request(target,
  1395. I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
  1396. MAX_SCHEDULE_TIMEOUT);
  1397. if (timeout < 0)
  1398. return timeout;
  1399. i915_gem_request_retire_upto(target);
  1400. intel_ring_update_space(ring);
  1401. GEM_BUG_ON(ring->space < bytes);
  1402. return 0;
  1403. }
  1404. u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
  1405. {
  1406. struct intel_ring *ring = req->ring;
  1407. int remain_actual = ring->size - ring->emit;
  1408. int remain_usable = ring->effective_size - ring->emit;
  1409. int bytes = num_dwords * sizeof(u32);
  1410. int total_bytes, wait_bytes;
  1411. bool need_wrap = false;
  1412. u32 *cs;
  1413. total_bytes = bytes + req->reserved_space;
  1414. if (unlikely(bytes > remain_usable)) {
  1415. /*
  1416. * Not enough space for the basic request. So need to flush
  1417. * out the remainder and then wait for base + reserved.
  1418. */
  1419. wait_bytes = remain_actual + total_bytes;
  1420. need_wrap = true;
  1421. } else if (unlikely(total_bytes > remain_usable)) {
  1422. /*
  1423. * The base request will fit but the reserved space
  1424. * falls off the end. So we don't need an immediate wrap
  1425. * and only need to effectively wait for the reserved
  1426. * size space from the start of ringbuffer.
  1427. */
  1428. wait_bytes = remain_actual + req->reserved_space;
  1429. } else {
  1430. /* No wrapping required, just waiting. */
  1431. wait_bytes = total_bytes;
  1432. }
  1433. if (wait_bytes > ring->space) {
  1434. int ret = wait_for_space(req, wait_bytes);
  1435. if (unlikely(ret))
  1436. return ERR_PTR(ret);
  1437. }
  1438. if (unlikely(need_wrap)) {
  1439. GEM_BUG_ON(remain_actual > ring->space);
  1440. GEM_BUG_ON(ring->emit + remain_actual > ring->size);
  1441. /* Fill the tail with MI_NOOP */
  1442. memset(ring->vaddr + ring->emit, 0, remain_actual);
  1443. ring->emit = 0;
  1444. ring->space -= remain_actual;
  1445. }
  1446. GEM_BUG_ON(ring->emit > ring->size - bytes);
  1447. cs = ring->vaddr + ring->emit;
  1448. GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes));
  1449. ring->emit += bytes;
  1450. ring->space -= bytes;
  1451. GEM_BUG_ON(ring->space < 0);
  1452. return cs;
  1453. }
  1454. /* Align the ring tail to a cacheline boundary */
  1455. int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
  1456. {
  1457. int num_dwords =
  1458. (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
  1459. u32 *cs;
  1460. if (num_dwords == 0)
  1461. return 0;
  1462. num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
  1463. cs = intel_ring_begin(req, num_dwords);
  1464. if (IS_ERR(cs))
  1465. return PTR_ERR(cs);
  1466. while (num_dwords--)
  1467. *cs++ = MI_NOOP;
  1468. intel_ring_advance(req, cs);
  1469. return 0;
  1470. }
  1471. static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
  1472. {
  1473. struct drm_i915_private *dev_priv = request->i915;
  1474. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1475. /* Every tail move must follow the sequence below */
  1476. /* Disable notification that the ring is IDLE. The GT
  1477. * will then assume that it is busy and bring it out of rc6.
  1478. */
  1479. I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1480. _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1481. /* Clear the context id. Here be magic! */
  1482. I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
  1483. /* Wait for the ring not to be idle, i.e. for it to wake up. */
  1484. if (__intel_wait_for_register_fw(dev_priv,
  1485. GEN6_BSD_SLEEP_PSMI_CONTROL,
  1486. GEN6_BSD_SLEEP_INDICATOR,
  1487. 0,
  1488. 1000, 0, NULL))
  1489. DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
  1490. /* Now that the ring is fully powered up, update the tail */
  1491. i9xx_submit_request(request);
  1492. /* Let the ring send IDLE messages to the GT again,
  1493. * and so let it sleep to conserve power when idle.
  1494. */
  1495. I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1496. _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1497. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1498. }
  1499. static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  1500. {
  1501. u32 cmd, *cs;
  1502. cs = intel_ring_begin(req, 4);
  1503. if (IS_ERR(cs))
  1504. return PTR_ERR(cs);
  1505. cmd = MI_FLUSH_DW;
  1506. if (INTEL_GEN(req->i915) >= 8)
  1507. cmd += 1;
  1508. /* We always require a command barrier so that subsequent
  1509. * commands, such as breadcrumb interrupts, are strictly ordered
  1510. * wrt the contents of the write cache being flushed to memory
  1511. * (and thus being coherent from the CPU).
  1512. */
  1513. cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  1514. /*
  1515. * Bspec vol 1c.5 - video engine command streamer:
  1516. * "If ENABLED, all TLBs will be invalidated once the flush
  1517. * operation is complete. This bit is only valid when the
  1518. * Post-Sync Operation field is a value of 1h or 3h."
  1519. */
  1520. if (mode & EMIT_INVALIDATE)
  1521. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
  1522. *cs++ = cmd;
  1523. *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
  1524. if (INTEL_GEN(req->i915) >= 8) {
  1525. *cs++ = 0; /* upper addr */
  1526. *cs++ = 0; /* value */
  1527. } else {
  1528. *cs++ = 0;
  1529. *cs++ = MI_NOOP;
  1530. }
  1531. intel_ring_advance(req, cs);
  1532. return 0;
  1533. }
  1534. static int
  1535. gen8_emit_bb_start(struct drm_i915_gem_request *req,
  1536. u64 offset, u32 len,
  1537. unsigned int dispatch_flags)
  1538. {
  1539. bool ppgtt = USES_PPGTT(req->i915) &&
  1540. !(dispatch_flags & I915_DISPATCH_SECURE);
  1541. u32 *cs;
  1542. cs = intel_ring_begin(req, 4);
  1543. if (IS_ERR(cs))
  1544. return PTR_ERR(cs);
  1545. /* FIXME(BDW): Address space and security selectors. */
  1546. *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
  1547. I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
  1548. *cs++ = lower_32_bits(offset);
  1549. *cs++ = upper_32_bits(offset);
  1550. *cs++ = MI_NOOP;
  1551. intel_ring_advance(req, cs);
  1552. return 0;
  1553. }
  1554. static int
  1555. hsw_emit_bb_start(struct drm_i915_gem_request *req,
  1556. u64 offset, u32 len,
  1557. unsigned int dispatch_flags)
  1558. {
  1559. u32 *cs;
  1560. cs = intel_ring_begin(req, 2);
  1561. if (IS_ERR(cs))
  1562. return PTR_ERR(cs);
  1563. *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
  1564. 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
  1565. (dispatch_flags & I915_DISPATCH_RS ?
  1566. MI_BATCH_RESOURCE_STREAMER : 0);
  1567. /* bit0-7 is the length on GEN6+ */
  1568. *cs++ = offset;
  1569. intel_ring_advance(req, cs);
  1570. return 0;
  1571. }
  1572. static int
  1573. gen6_emit_bb_start(struct drm_i915_gem_request *req,
  1574. u64 offset, u32 len,
  1575. unsigned int dispatch_flags)
  1576. {
  1577. u32 *cs;
  1578. cs = intel_ring_begin(req, 2);
  1579. if (IS_ERR(cs))
  1580. return PTR_ERR(cs);
  1581. *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
  1582. 0 : MI_BATCH_NON_SECURE_I965);
  1583. /* bit0-7 is the length on GEN6+ */
  1584. *cs++ = offset;
  1585. intel_ring_advance(req, cs);
  1586. return 0;
  1587. }
  1588. /* Blitter support (SandyBridge+) */
  1589. static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  1590. {
  1591. u32 cmd, *cs;
  1592. cs = intel_ring_begin(req, 4);
  1593. if (IS_ERR(cs))
  1594. return PTR_ERR(cs);
  1595. cmd = MI_FLUSH_DW;
  1596. if (INTEL_GEN(req->i915) >= 8)
  1597. cmd += 1;
  1598. /* We always require a command barrier so that subsequent
  1599. * commands, such as breadcrumb interrupts, are strictly ordered
  1600. * wrt the contents of the write cache being flushed to memory
  1601. * (and thus being coherent from the CPU).
  1602. */
  1603. cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  1604. /*
  1605. * Bspec vol 1c.3 - blitter engine command streamer:
  1606. * "If ENABLED, all TLBs will be invalidated once the flush
  1607. * operation is complete. This bit is only valid when the
  1608. * Post-Sync Operation field is a value of 1h or 3h."
  1609. */
  1610. if (mode & EMIT_INVALIDATE)
  1611. cmd |= MI_INVALIDATE_TLB;
  1612. *cs++ = cmd;
  1613. *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
  1614. if (INTEL_GEN(req->i915) >= 8) {
  1615. *cs++ = 0; /* upper addr */
  1616. *cs++ = 0; /* value */
  1617. } else {
  1618. *cs++ = 0;
  1619. *cs++ = MI_NOOP;
  1620. }
  1621. intel_ring_advance(req, cs);
  1622. return 0;
  1623. }
  1624. static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
  1625. struct intel_engine_cs *engine)
  1626. {
  1627. struct drm_i915_gem_object *obj;
  1628. int ret, i;
  1629. if (!i915.semaphores)
  1630. return;
  1631. if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
  1632. struct i915_vma *vma;
  1633. obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
  1634. if (IS_ERR(obj))
  1635. goto err;
  1636. vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
  1637. if (IS_ERR(vma))
  1638. goto err_obj;
  1639. ret = i915_gem_object_set_to_gtt_domain(obj, false);
  1640. if (ret)
  1641. goto err_obj;
  1642. ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
  1643. if (ret)
  1644. goto err_obj;
  1645. dev_priv->semaphore = vma;
  1646. }
  1647. if (INTEL_GEN(dev_priv) >= 8) {
  1648. u32 offset = i915_ggtt_offset(dev_priv->semaphore);
  1649. engine->semaphore.sync_to = gen8_ring_sync_to;
  1650. engine->semaphore.signal = gen8_xcs_signal;
  1651. for (i = 0; i < I915_NUM_ENGINES; i++) {
  1652. u32 ring_offset;
  1653. if (i != engine->id)
  1654. ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
  1655. else
  1656. ring_offset = MI_SEMAPHORE_SYNC_INVALID;
  1657. engine->semaphore.signal_ggtt[i] = ring_offset;
  1658. }
  1659. } else if (INTEL_GEN(dev_priv) >= 6) {
  1660. engine->semaphore.sync_to = gen6_ring_sync_to;
  1661. engine->semaphore.signal = gen6_signal;
  1662. /*
  1663. * The current semaphore is only applied on pre-gen8
  1664. * platform. And there is no VCS2 ring on the pre-gen8
  1665. * platform. So the semaphore between RCS and VCS2 is
  1666. * initialized as INVALID. Gen8 will initialize the
  1667. * sema between VCS2 and RCS later.
  1668. */
  1669. for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
  1670. static const struct {
  1671. u32 wait_mbox;
  1672. i915_reg_t mbox_reg;
  1673. } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
  1674. [RCS_HW] = {
  1675. [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
  1676. [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
  1677. [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
  1678. },
  1679. [VCS_HW] = {
  1680. [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
  1681. [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
  1682. [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
  1683. },
  1684. [BCS_HW] = {
  1685. [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
  1686. [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
  1687. [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
  1688. },
  1689. [VECS_HW] = {
  1690. [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
  1691. [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
  1692. [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
  1693. },
  1694. };
  1695. u32 wait_mbox;
  1696. i915_reg_t mbox_reg;
  1697. if (i == engine->hw_id) {
  1698. wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
  1699. mbox_reg = GEN6_NOSYNC;
  1700. } else {
  1701. wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
  1702. mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
  1703. }
  1704. engine->semaphore.mbox.wait[i] = wait_mbox;
  1705. engine->semaphore.mbox.signal[i] = mbox_reg;
  1706. }
  1707. }
  1708. return;
  1709. err_obj:
  1710. i915_gem_object_put(obj);
  1711. err:
  1712. DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
  1713. i915.semaphores = 0;
  1714. }
  1715. static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
  1716. struct intel_engine_cs *engine)
  1717. {
  1718. engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
  1719. if (INTEL_GEN(dev_priv) >= 8) {
  1720. engine->irq_enable = gen8_irq_enable;
  1721. engine->irq_disable = gen8_irq_disable;
  1722. engine->irq_seqno_barrier = gen6_seqno_barrier;
  1723. } else if (INTEL_GEN(dev_priv) >= 6) {
  1724. engine->irq_enable = gen6_irq_enable;
  1725. engine->irq_disable = gen6_irq_disable;
  1726. engine->irq_seqno_barrier = gen6_seqno_barrier;
  1727. } else if (INTEL_GEN(dev_priv) >= 5) {
  1728. engine->irq_enable = gen5_irq_enable;
  1729. engine->irq_disable = gen5_irq_disable;
  1730. engine->irq_seqno_barrier = gen5_seqno_barrier;
  1731. } else if (INTEL_GEN(dev_priv) >= 3) {
  1732. engine->irq_enable = i9xx_irq_enable;
  1733. engine->irq_disable = i9xx_irq_disable;
  1734. } else {
  1735. engine->irq_enable = i8xx_irq_enable;
  1736. engine->irq_disable = i8xx_irq_disable;
  1737. }
  1738. }
  1739. static void i9xx_set_default_submission(struct intel_engine_cs *engine)
  1740. {
  1741. engine->submit_request = i9xx_submit_request;
  1742. }
  1743. static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
  1744. {
  1745. engine->submit_request = gen6_bsd_submit_request;
  1746. }
  1747. static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
  1748. struct intel_engine_cs *engine)
  1749. {
  1750. intel_ring_init_irq(dev_priv, engine);
  1751. intel_ring_init_semaphores(dev_priv, engine);
  1752. engine->init_hw = init_ring_common;
  1753. engine->reset_hw = reset_ring_common;
  1754. engine->context_pin = intel_ring_context_pin;
  1755. engine->context_unpin = intel_ring_context_unpin;
  1756. engine->request_alloc = ring_request_alloc;
  1757. engine->emit_breadcrumb = i9xx_emit_breadcrumb;
  1758. engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
  1759. if (i915.semaphores) {
  1760. int num_rings;
  1761. engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
  1762. num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
  1763. if (INTEL_GEN(dev_priv) >= 8) {
  1764. engine->emit_breadcrumb_sz += num_rings * 6;
  1765. } else {
  1766. engine->emit_breadcrumb_sz += num_rings * 3;
  1767. if (num_rings & 1)
  1768. engine->emit_breadcrumb_sz++;
  1769. }
  1770. }
  1771. engine->set_default_submission = i9xx_set_default_submission;
  1772. if (INTEL_GEN(dev_priv) >= 8)
  1773. engine->emit_bb_start = gen8_emit_bb_start;
  1774. else if (INTEL_GEN(dev_priv) >= 6)
  1775. engine->emit_bb_start = gen6_emit_bb_start;
  1776. else if (INTEL_GEN(dev_priv) >= 4)
  1777. engine->emit_bb_start = i965_emit_bb_start;
  1778. else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
  1779. engine->emit_bb_start = i830_emit_bb_start;
  1780. else
  1781. engine->emit_bb_start = i915_emit_bb_start;
  1782. }
  1783. int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
  1784. {
  1785. struct drm_i915_private *dev_priv = engine->i915;
  1786. int ret;
  1787. intel_ring_default_vfuncs(dev_priv, engine);
  1788. if (HAS_L3_DPF(dev_priv))
  1789. engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  1790. if (INTEL_GEN(dev_priv) >= 8) {
  1791. engine->init_context = intel_rcs_ctx_init;
  1792. engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
  1793. engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
  1794. engine->emit_flush = gen8_render_ring_flush;
  1795. if (i915.semaphores) {
  1796. int num_rings;
  1797. engine->semaphore.signal = gen8_rcs_signal;
  1798. num_rings =
  1799. hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
  1800. engine->emit_breadcrumb_sz += num_rings * 8;
  1801. }
  1802. } else if (INTEL_GEN(dev_priv) >= 6) {
  1803. engine->init_context = intel_rcs_ctx_init;
  1804. engine->emit_flush = gen7_render_ring_flush;
  1805. if (IS_GEN6(dev_priv))
  1806. engine->emit_flush = gen6_render_ring_flush;
  1807. } else if (IS_GEN5(dev_priv)) {
  1808. engine->emit_flush = gen4_render_ring_flush;
  1809. } else {
  1810. if (INTEL_GEN(dev_priv) < 4)
  1811. engine->emit_flush = gen2_render_ring_flush;
  1812. else
  1813. engine->emit_flush = gen4_render_ring_flush;
  1814. engine->irq_enable_mask = I915_USER_INTERRUPT;
  1815. }
  1816. if (IS_HASWELL(dev_priv))
  1817. engine->emit_bb_start = hsw_emit_bb_start;
  1818. engine->init_hw = init_render_ring;
  1819. engine->cleanup = render_ring_cleanup;
  1820. ret = intel_init_ring_buffer(engine);
  1821. if (ret)
  1822. return ret;
  1823. if (INTEL_GEN(dev_priv) >= 6) {
  1824. ret = intel_engine_create_scratch(engine, PAGE_SIZE);
  1825. if (ret)
  1826. return ret;
  1827. } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
  1828. ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
  1829. if (ret)
  1830. return ret;
  1831. }
  1832. return 0;
  1833. }
  1834. int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
  1835. {
  1836. struct drm_i915_private *dev_priv = engine->i915;
  1837. intel_ring_default_vfuncs(dev_priv, engine);
  1838. if (INTEL_GEN(dev_priv) >= 6) {
  1839. /* gen6 bsd needs a special wa for tail updates */
  1840. if (IS_GEN6(dev_priv))
  1841. engine->set_default_submission = gen6_bsd_set_default_submission;
  1842. engine->emit_flush = gen6_bsd_ring_flush;
  1843. if (INTEL_GEN(dev_priv) < 8)
  1844. engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
  1845. } else {
  1846. engine->mmio_base = BSD_RING_BASE;
  1847. engine->emit_flush = bsd_ring_flush;
  1848. if (IS_GEN5(dev_priv))
  1849. engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
  1850. else
  1851. engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
  1852. }
  1853. return intel_init_ring_buffer(engine);
  1854. }
  1855. int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
  1856. {
  1857. struct drm_i915_private *dev_priv = engine->i915;
  1858. intel_ring_default_vfuncs(dev_priv, engine);
  1859. engine->emit_flush = gen6_ring_flush;
  1860. if (INTEL_GEN(dev_priv) < 8)
  1861. engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
  1862. return intel_init_ring_buffer(engine);
  1863. }
  1864. int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
  1865. {
  1866. struct drm_i915_private *dev_priv = engine->i915;
  1867. intel_ring_default_vfuncs(dev_priv, engine);
  1868. engine->emit_flush = gen6_ring_flush;
  1869. if (INTEL_GEN(dev_priv) < 8) {
  1870. engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
  1871. engine->irq_enable = hsw_vebox_irq_enable;
  1872. engine->irq_disable = hsw_vebox_irq_disable;
  1873. }
  1874. return intel_init_ring_buffer(engine);
  1875. }