intel_ringbuffer.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include <linux/log2.h>
  30. #include <drm/drmP.h>
  31. #include "i915_drv.h"
  32. #include <drm/i915_drm.h>
  33. #include "i915_trace.h"
  34. #include "intel_drv.h"
  35. /* Rough estimate of the typical request size, performing a flush,
  36. * set-context and then emitting the batch.
  37. */
  38. #define LEGACY_REQUEST_SIZE 200
  39. int __intel_ring_space(int head, int tail, int size)
  40. {
  41. int space = head - tail;
  42. if (space <= 0)
  43. space += size;
  44. return space - I915_RING_FREE_SPACE;
  45. }
  46. void intel_ring_update_space(struct intel_ring *ring)
  47. {
  48. if (ring->last_retired_head != -1) {
  49. ring->head = ring->last_retired_head;
  50. ring->last_retired_head = -1;
  51. }
  52. ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
  53. ring->tail, ring->size);
  54. }
  55. static int
  56. gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  57. {
  58. struct intel_ring *ring = req->ring;
  59. u32 cmd;
  60. int ret;
  61. cmd = MI_FLUSH;
  62. if (mode & EMIT_INVALIDATE)
  63. cmd |= MI_READ_FLUSH;
  64. ret = intel_ring_begin(req, 2);
  65. if (ret)
  66. return ret;
  67. intel_ring_emit(ring, cmd);
  68. intel_ring_emit(ring, MI_NOOP);
  69. intel_ring_advance(ring);
  70. return 0;
  71. }
  72. static int
  73. gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  74. {
  75. struct intel_ring *ring = req->ring;
  76. u32 cmd;
  77. int ret;
  78. /*
  79. * read/write caches:
  80. *
  81. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  82. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  83. * also flushed at 2d versus 3d pipeline switches.
  84. *
  85. * read-only caches:
  86. *
  87. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  88. * MI_READ_FLUSH is set, and is always flushed on 965.
  89. *
  90. * I915_GEM_DOMAIN_COMMAND may not exist?
  91. *
  92. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  93. * invalidated when MI_EXE_FLUSH is set.
  94. *
  95. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  96. * invalidated with every MI_FLUSH.
  97. *
  98. * TLBs:
  99. *
  100. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  101. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  102. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  103. * are flushed at any MI_FLUSH.
  104. */
  105. cmd = MI_FLUSH;
  106. if (mode & EMIT_INVALIDATE) {
  107. cmd |= MI_EXE_FLUSH;
  108. if (IS_G4X(req->i915) || IS_GEN5(req->i915))
  109. cmd |= MI_INVALIDATE_ISP;
  110. }
  111. ret = intel_ring_begin(req, 2);
  112. if (ret)
  113. return ret;
  114. intel_ring_emit(ring, cmd);
  115. intel_ring_emit(ring, MI_NOOP);
  116. intel_ring_advance(ring);
  117. return 0;
  118. }
  119. /**
  120. * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  121. * implementing two workarounds on gen6. From section 1.4.7.1
  122. * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
  123. *
  124. * [DevSNB-C+{W/A}] Before any depth stall flush (including those
  125. * produced by non-pipelined state commands), software needs to first
  126. * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
  127. * 0.
  128. *
  129. * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
  130. * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
  131. *
  132. * And the workaround for these two requires this workaround first:
  133. *
  134. * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
  135. * BEFORE the pipe-control with a post-sync op and no write-cache
  136. * flushes.
  137. *
  138. * And this last workaround is tricky because of the requirements on
  139. * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
  140. * volume 2 part 1:
  141. *
  142. * "1 of the following must also be set:
  143. * - Render Target Cache Flush Enable ([12] of DW1)
  144. * - Depth Cache Flush Enable ([0] of DW1)
  145. * - Stall at Pixel Scoreboard ([1] of DW1)
  146. * - Depth Stall ([13] of DW1)
  147. * - Post-Sync Operation ([13] of DW1)
  148. * - Notify Enable ([8] of DW1)"
  149. *
  150. * The cache flushes require the workaround flush that triggered this
  151. * one, so we can't use it. Depth stall would trigger the same.
  152. * Post-sync nonzero is what triggered this second workaround, so we
  153. * can't use that one either. Notify enable is IRQs, which aren't
  154. * really our business. That leaves only stall at scoreboard.
  155. */
  156. static int
  157. intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
  158. {
  159. struct intel_ring *ring = req->ring;
  160. u32 scratch_addr =
  161. i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
  162. int ret;
  163. ret = intel_ring_begin(req, 6);
  164. if (ret)
  165. return ret;
  166. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  167. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  168. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  169. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  170. intel_ring_emit(ring, 0); /* low dword */
  171. intel_ring_emit(ring, 0); /* high dword */
  172. intel_ring_emit(ring, MI_NOOP);
  173. intel_ring_advance(ring);
  174. ret = intel_ring_begin(req, 6);
  175. if (ret)
  176. return ret;
  177. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  178. intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
  179. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  180. intel_ring_emit(ring, 0);
  181. intel_ring_emit(ring, 0);
  182. intel_ring_emit(ring, MI_NOOP);
  183. intel_ring_advance(ring);
  184. return 0;
  185. }
  186. static int
  187. gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  188. {
  189. struct intel_ring *ring = req->ring;
  190. u32 scratch_addr =
  191. i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
  192. u32 flags = 0;
  193. int ret;
  194. /* Force SNB workarounds for PIPE_CONTROL flushes */
  195. ret = intel_emit_post_sync_nonzero_flush(req);
  196. if (ret)
  197. return ret;
  198. /* Just flush everything. Experiments have shown that reducing the
  199. * number of bits based on the write domains has little performance
  200. * impact.
  201. */
  202. if (mode & EMIT_FLUSH) {
  203. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  204. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  205. /*
  206. * Ensure that any following seqno writes only happen
  207. * when the render cache is indeed flushed.
  208. */
  209. flags |= PIPE_CONTROL_CS_STALL;
  210. }
  211. if (mode & EMIT_INVALIDATE) {
  212. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  213. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  214. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  215. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  216. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  217. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  218. /*
  219. * TLB invalidate requires a post-sync write.
  220. */
  221. flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
  222. }
  223. ret = intel_ring_begin(req, 4);
  224. if (ret)
  225. return ret;
  226. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  227. intel_ring_emit(ring, flags);
  228. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  229. intel_ring_emit(ring, 0);
  230. intel_ring_advance(ring);
  231. return 0;
  232. }
  233. static int
  234. gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
  235. {
  236. struct intel_ring *ring = req->ring;
  237. int ret;
  238. ret = intel_ring_begin(req, 4);
  239. if (ret)
  240. return ret;
  241. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  242. intel_ring_emit(ring,
  243. PIPE_CONTROL_CS_STALL |
  244. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  245. intel_ring_emit(ring, 0);
  246. intel_ring_emit(ring, 0);
  247. intel_ring_advance(ring);
  248. return 0;
  249. }
  250. static int
  251. gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  252. {
  253. struct intel_ring *ring = req->ring;
  254. u32 scratch_addr =
  255. i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
  256. u32 flags = 0;
  257. int ret;
  258. /*
  259. * Ensure that any following seqno writes only happen when the render
  260. * cache is indeed flushed.
  261. *
  262. * Workaround: 4th PIPE_CONTROL command (except the ones with only
  263. * read-cache invalidate bits set) must have the CS_STALL bit set. We
  264. * don't try to be clever and just set it unconditionally.
  265. */
  266. flags |= PIPE_CONTROL_CS_STALL;
  267. /* Just flush everything. Experiments have shown that reducing the
  268. * number of bits based on the write domains has little performance
  269. * impact.
  270. */
  271. if (mode & EMIT_FLUSH) {
  272. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  273. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  274. flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
  275. flags |= PIPE_CONTROL_FLUSH_ENABLE;
  276. }
  277. if (mode & EMIT_INVALIDATE) {
  278. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  279. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  280. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  281. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  282. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  283. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  284. flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
  285. /*
  286. * TLB invalidate requires a post-sync write.
  287. */
  288. flags |= PIPE_CONTROL_QW_WRITE;
  289. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  290. flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
  291. /* Workaround: we must issue a pipe_control with CS-stall bit
  292. * set before a pipe_control command that has the state cache
  293. * invalidate bit set. */
  294. gen7_render_ring_cs_stall_wa(req);
  295. }
  296. ret = intel_ring_begin(req, 4);
  297. if (ret)
  298. return ret;
  299. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  300. intel_ring_emit(ring, flags);
  301. intel_ring_emit(ring, scratch_addr);
  302. intel_ring_emit(ring, 0);
  303. intel_ring_advance(ring);
  304. return 0;
  305. }
  306. static int
  307. gen8_emit_pipe_control(struct drm_i915_gem_request *req,
  308. u32 flags, u32 scratch_addr)
  309. {
  310. struct intel_ring *ring = req->ring;
  311. int ret;
  312. ret = intel_ring_begin(req, 6);
  313. if (ret)
  314. return ret;
  315. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
  316. intel_ring_emit(ring, flags);
  317. intel_ring_emit(ring, scratch_addr);
  318. intel_ring_emit(ring, 0);
  319. intel_ring_emit(ring, 0);
  320. intel_ring_emit(ring, 0);
  321. intel_ring_advance(ring);
  322. return 0;
  323. }
  324. static int
  325. gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  326. {
  327. u32 scratch_addr =
  328. i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
  329. u32 flags = 0;
  330. int ret;
  331. flags |= PIPE_CONTROL_CS_STALL;
  332. if (mode & EMIT_FLUSH) {
  333. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  334. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  335. flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
  336. flags |= PIPE_CONTROL_FLUSH_ENABLE;
  337. }
  338. if (mode & EMIT_INVALIDATE) {
  339. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  340. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  341. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  342. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  343. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  344. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  345. flags |= PIPE_CONTROL_QW_WRITE;
  346. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  347. /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
  348. ret = gen8_emit_pipe_control(req,
  349. PIPE_CONTROL_CS_STALL |
  350. PIPE_CONTROL_STALL_AT_SCOREBOARD,
  351. 0);
  352. if (ret)
  353. return ret;
  354. }
  355. return gen8_emit_pipe_control(req, flags, scratch_addr);
  356. }
  357. u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
  358. {
  359. struct drm_i915_private *dev_priv = engine->i915;
  360. u64 acthd;
  361. if (INTEL_GEN(dev_priv) >= 8)
  362. acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
  363. RING_ACTHD_UDW(engine->mmio_base));
  364. else if (INTEL_GEN(dev_priv) >= 4)
  365. acthd = I915_READ(RING_ACTHD(engine->mmio_base));
  366. else
  367. acthd = I915_READ(ACTHD);
  368. return acthd;
  369. }
  370. static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
  371. {
  372. struct drm_i915_private *dev_priv = engine->i915;
  373. u32 addr;
  374. addr = dev_priv->status_page_dmah->busaddr;
  375. if (INTEL_GEN(dev_priv) >= 4)
  376. addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  377. I915_WRITE(HWS_PGA, addr);
  378. }
  379. static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
  380. {
  381. struct drm_i915_private *dev_priv = engine->i915;
  382. i915_reg_t mmio;
  383. /* The ring status page addresses are no longer next to the rest of
  384. * the ring registers as of gen7.
  385. */
  386. if (IS_GEN7(dev_priv)) {
  387. switch (engine->id) {
  388. case RCS:
  389. mmio = RENDER_HWS_PGA_GEN7;
  390. break;
  391. case BCS:
  392. mmio = BLT_HWS_PGA_GEN7;
  393. break;
  394. /*
  395. * VCS2 actually doesn't exist on Gen7. Only shut up
  396. * gcc switch check warning
  397. */
  398. case VCS2:
  399. case VCS:
  400. mmio = BSD_HWS_PGA_GEN7;
  401. break;
  402. case VECS:
  403. mmio = VEBOX_HWS_PGA_GEN7;
  404. break;
  405. }
  406. } else if (IS_GEN6(dev_priv)) {
  407. mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
  408. } else {
  409. /* XXX: gen8 returns to sanity */
  410. mmio = RING_HWS_PGA(engine->mmio_base);
  411. }
  412. I915_WRITE(mmio, engine->status_page.ggtt_offset);
  413. POSTING_READ(mmio);
  414. /*
  415. * Flush the TLB for this page
  416. *
  417. * FIXME: These two bits have disappeared on gen8, so a question
  418. * arises: do we still need this and if so how should we go about
  419. * invalidating the TLB?
  420. */
  421. if (IS_GEN(dev_priv, 6, 7)) {
  422. i915_reg_t reg = RING_INSTPM(engine->mmio_base);
  423. /* ring should be idle before issuing a sync flush*/
  424. WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
  425. I915_WRITE(reg,
  426. _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
  427. INSTPM_SYNC_FLUSH));
  428. if (intel_wait_for_register(dev_priv,
  429. reg, INSTPM_SYNC_FLUSH, 0,
  430. 1000))
  431. DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
  432. engine->name);
  433. }
  434. }
  435. static bool stop_ring(struct intel_engine_cs *engine)
  436. {
  437. struct drm_i915_private *dev_priv = engine->i915;
  438. if (INTEL_GEN(dev_priv) > 2) {
  439. I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
  440. if (intel_wait_for_register(dev_priv,
  441. RING_MI_MODE(engine->mmio_base),
  442. MODE_IDLE,
  443. MODE_IDLE,
  444. 1000)) {
  445. DRM_ERROR("%s : timed out trying to stop ring\n",
  446. engine->name);
  447. /* Sometimes we observe that the idle flag is not
  448. * set even though the ring is empty. So double
  449. * check before giving up.
  450. */
  451. if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
  452. return false;
  453. }
  454. }
  455. I915_WRITE_CTL(engine, 0);
  456. I915_WRITE_HEAD(engine, 0);
  457. I915_WRITE_TAIL(engine, 0);
  458. if (INTEL_GEN(dev_priv) > 2) {
  459. (void)I915_READ_CTL(engine);
  460. I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
  461. }
  462. return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
  463. }
  464. static int init_ring_common(struct intel_engine_cs *engine)
  465. {
  466. struct drm_i915_private *dev_priv = engine->i915;
  467. struct intel_ring *ring = engine->buffer;
  468. int ret = 0;
  469. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  470. if (!stop_ring(engine)) {
  471. /* G45 ring initialization often fails to reset head to zero */
  472. DRM_DEBUG_KMS("%s head not reset to zero "
  473. "ctl %08x head %08x tail %08x start %08x\n",
  474. engine->name,
  475. I915_READ_CTL(engine),
  476. I915_READ_HEAD(engine),
  477. I915_READ_TAIL(engine),
  478. I915_READ_START(engine));
  479. if (!stop_ring(engine)) {
  480. DRM_ERROR("failed to set %s head to zero "
  481. "ctl %08x head %08x tail %08x start %08x\n",
  482. engine->name,
  483. I915_READ_CTL(engine),
  484. I915_READ_HEAD(engine),
  485. I915_READ_TAIL(engine),
  486. I915_READ_START(engine));
  487. ret = -EIO;
  488. goto out;
  489. }
  490. }
  491. if (HWS_NEEDS_PHYSICAL(dev_priv))
  492. ring_setup_phys_status_page(engine);
  493. else
  494. intel_ring_setup_status_page(engine);
  495. intel_engine_reset_irq(engine);
  496. /* Enforce ordering by reading HEAD register back */
  497. I915_READ_HEAD(engine);
  498. /* Initialize the ring. This must happen _after_ we've cleared the ring
  499. * registers with the above sequence (the readback of the HEAD registers
  500. * also enforces ordering), otherwise the hw might lose the new ring
  501. * register values. */
  502. I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
  503. /* WaClearRingBufHeadRegAtInit:ctg,elk */
  504. if (I915_READ_HEAD(engine))
  505. DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
  506. engine->name, I915_READ_HEAD(engine));
  507. intel_ring_update_space(ring);
  508. I915_WRITE_HEAD(engine, ring->head);
  509. I915_WRITE_TAIL(engine, ring->tail);
  510. (void)I915_READ_TAIL(engine);
  511. I915_WRITE_CTL(engine,
  512. ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
  513. | RING_VALID);
  514. /* If the head is still not zero, the ring is dead */
  515. if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
  516. RING_VALID, RING_VALID,
  517. 50)) {
  518. DRM_ERROR("%s initialization failed "
  519. "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
  520. engine->name,
  521. I915_READ_CTL(engine),
  522. I915_READ_CTL(engine) & RING_VALID,
  523. I915_READ_HEAD(engine), ring->head,
  524. I915_READ_TAIL(engine), ring->tail,
  525. I915_READ_START(engine),
  526. i915_ggtt_offset(ring->vma));
  527. ret = -EIO;
  528. goto out;
  529. }
  530. intel_engine_init_hangcheck(engine);
  531. out:
  532. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  533. return ret;
  534. }
  535. static void reset_ring_common(struct intel_engine_cs *engine,
  536. struct drm_i915_gem_request *request)
  537. {
  538. struct intel_ring *ring = request->ring;
  539. ring->head = request->postfix;
  540. ring->last_retired_head = -1;
  541. }
  542. static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
  543. {
  544. struct intel_ring *ring = req->ring;
  545. struct i915_workarounds *w = &req->i915->workarounds;
  546. int ret, i;
  547. if (w->count == 0)
  548. return 0;
  549. ret = req->engine->emit_flush(req, EMIT_BARRIER);
  550. if (ret)
  551. return ret;
  552. ret = intel_ring_begin(req, (w->count * 2 + 2));
  553. if (ret)
  554. return ret;
  555. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
  556. for (i = 0; i < w->count; i++) {
  557. intel_ring_emit_reg(ring, w->reg[i].addr);
  558. intel_ring_emit(ring, w->reg[i].value);
  559. }
  560. intel_ring_emit(ring, MI_NOOP);
  561. intel_ring_advance(ring);
  562. ret = req->engine->emit_flush(req, EMIT_BARRIER);
  563. if (ret)
  564. return ret;
  565. DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
  566. return 0;
  567. }
  568. static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
  569. {
  570. int ret;
  571. ret = intel_ring_workarounds_emit(req);
  572. if (ret != 0)
  573. return ret;
  574. ret = i915_gem_render_state_init(req);
  575. if (ret)
  576. return ret;
  577. return 0;
  578. }
  579. static int wa_add(struct drm_i915_private *dev_priv,
  580. i915_reg_t addr,
  581. const u32 mask, const u32 val)
  582. {
  583. const u32 idx = dev_priv->workarounds.count;
  584. if (WARN_ON(idx >= I915_MAX_WA_REGS))
  585. return -ENOSPC;
  586. dev_priv->workarounds.reg[idx].addr = addr;
  587. dev_priv->workarounds.reg[idx].value = val;
  588. dev_priv->workarounds.reg[idx].mask = mask;
  589. dev_priv->workarounds.count++;
  590. return 0;
  591. }
  592. #define WA_REG(addr, mask, val) do { \
  593. const int r = wa_add(dev_priv, (addr), (mask), (val)); \
  594. if (r) \
  595. return r; \
  596. } while (0)
  597. #define WA_SET_BIT_MASKED(addr, mask) \
  598. WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
  599. #define WA_CLR_BIT_MASKED(addr, mask) \
  600. WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
  601. #define WA_SET_FIELD_MASKED(addr, mask, value) \
  602. WA_REG(addr, mask, _MASKED_FIELD(mask, value))
  603. #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
  604. #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
  605. #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
  606. static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
  607. i915_reg_t reg)
  608. {
  609. struct drm_i915_private *dev_priv = engine->i915;
  610. struct i915_workarounds *wa = &dev_priv->workarounds;
  611. const uint32_t index = wa->hw_whitelist_count[engine->id];
  612. if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
  613. return -EINVAL;
  614. WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
  615. i915_mmio_reg_offset(reg));
  616. wa->hw_whitelist_count[engine->id]++;
  617. return 0;
  618. }
  619. static int gen8_init_workarounds(struct intel_engine_cs *engine)
  620. {
  621. struct drm_i915_private *dev_priv = engine->i915;
  622. WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
  623. /* WaDisableAsyncFlipPerfMode:bdw,chv */
  624. WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
  625. /* WaDisablePartialInstShootdown:bdw,chv */
  626. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
  627. PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
  628. /* Use Force Non-Coherent whenever executing a 3D context. This is a
  629. * workaround for for a possible hang in the unlikely event a TLB
  630. * invalidation occurs during a PSD flush.
  631. */
  632. /* WaForceEnableNonCoherent:bdw,chv */
  633. /* WaHdcDisableFetchWhenMasked:bdw,chv */
  634. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  635. HDC_DONOT_FETCH_MEM_WHEN_MASKED |
  636. HDC_FORCE_NON_COHERENT);
  637. /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
  638. * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
  639. * polygons in the same 8x4 pixel/sample area to be processed without
  640. * stalling waiting for the earlier ones to write to Hierarchical Z
  641. * buffer."
  642. *
  643. * This optimization is off by default for BDW and CHV; turn it on.
  644. */
  645. WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
  646. /* Wa4x4STCOptimizationDisable:bdw,chv */
  647. WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
  648. /*
  649. * BSpec recommends 8x4 when MSAA is used,
  650. * however in practice 16x4 seems fastest.
  651. *
  652. * Note that PS/WM thread counts depend on the WIZ hashing
  653. * disable bit, which we don't touch here, but it's good
  654. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  655. */
  656. WA_SET_FIELD_MASKED(GEN7_GT_MODE,
  657. GEN6_WIZ_HASHING_MASK,
  658. GEN6_WIZ_HASHING_16x4);
  659. return 0;
  660. }
  661. static int bdw_init_workarounds(struct intel_engine_cs *engine)
  662. {
  663. struct drm_i915_private *dev_priv = engine->i915;
  664. int ret;
  665. ret = gen8_init_workarounds(engine);
  666. if (ret)
  667. return ret;
  668. /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
  669. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
  670. /* WaDisableDopClockGating:bdw */
  671. WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
  672. DOP_CLOCK_GATING_DISABLE);
  673. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
  674. GEN8_SAMPLER_POWER_BYPASS_DIS);
  675. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  676. /* WaForceContextSaveRestoreNonCoherent:bdw */
  677. HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
  678. /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
  679. (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
  680. return 0;
  681. }
  682. static int chv_init_workarounds(struct intel_engine_cs *engine)
  683. {
  684. struct drm_i915_private *dev_priv = engine->i915;
  685. int ret;
  686. ret = gen8_init_workarounds(engine);
  687. if (ret)
  688. return ret;
  689. /* WaDisableThreadStallDopClockGating:chv */
  690. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
  691. /* Improve HiZ throughput on CHV. */
  692. WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
  693. return 0;
  694. }
  695. static int gen9_init_workarounds(struct intel_engine_cs *engine)
  696. {
  697. struct drm_i915_private *dev_priv = engine->i915;
  698. int ret;
  699. /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
  700. I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
  701. /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
  702. I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
  703. GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
  704. /* WaDisableKillLogic:bxt,skl,kbl */
  705. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
  706. ECOCHK_DIS_TLB);
  707. /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
  708. /* WaDisablePartialInstShootdown:skl,bxt,kbl */
  709. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
  710. FLOW_CONTROL_ENABLE |
  711. PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
  712. /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
  713. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
  714. GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
  715. /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
  716. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
  717. WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
  718. GEN9_DG_MIRROR_FIX_ENABLE);
  719. /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
  720. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
  721. WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
  722. GEN9_RHWO_OPTIMIZATION_DISABLE);
  723. /*
  724. * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
  725. * but we do that in per ctx batchbuffer as there is an issue
  726. * with this register not getting restored on ctx restore
  727. */
  728. }
  729. /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
  730. /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
  731. WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
  732. GEN9_ENABLE_YV12_BUGFIX |
  733. GEN9_ENABLE_GPGPU_PREEMPTION);
  734. /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
  735. /* WaDisablePartialResolveInVc:skl,bxt,kbl */
  736. WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
  737. GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
  738. /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
  739. WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
  740. GEN9_CCS_TLB_PREFETCH_ENABLE);
  741. /* WaDisableMaskBasedCammingInRCC:bxt */
  742. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
  743. WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
  744. PIXEL_MASK_CAMMING_DISABLE);
  745. /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
  746. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  747. HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
  748. HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
  749. /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
  750. * both tied to WaForceContextSaveRestoreNonCoherent
  751. * in some hsds for skl. We keep the tie for all gen9. The
  752. * documentation is a bit hazy and so we want to get common behaviour,
  753. * even though there is no clear evidence we would need both on kbl/bxt.
  754. * This area has been source of system hangs so we play it safe
  755. * and mimic the skl regardless of what bspec says.
  756. *
  757. * Use Force Non-Coherent whenever executing a 3D context. This
  758. * is a workaround for a possible hang in the unlikely event
  759. * a TLB invalidation occurs during a PSD flush.
  760. */
  761. /* WaForceEnableNonCoherent:skl,bxt,kbl */
  762. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  763. HDC_FORCE_NON_COHERENT);
  764. /* WaDisableHDCInvalidation:skl,bxt,kbl */
  765. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
  766. BDW_DISABLE_HDC_INVALIDATION);
  767. /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
  768. if (IS_SKYLAKE(dev_priv) ||
  769. IS_KABYLAKE(dev_priv) ||
  770. IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
  771. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
  772. GEN8_SAMPLER_POWER_BYPASS_DIS);
  773. /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
  774. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
  775. /* WaOCLCoherentLineFlush:skl,bxt,kbl */
  776. I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
  777. GEN8_LQSC_FLUSH_COHERENT_LINES));
  778. /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
  779. ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
  780. if (ret)
  781. return ret;
  782. /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
  783. ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
  784. if (ret)
  785. return ret;
  786. /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
  787. ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
  788. if (ret)
  789. return ret;
  790. return 0;
  791. }
  792. static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
  793. {
  794. struct drm_i915_private *dev_priv = engine->i915;
  795. u8 vals[3] = { 0, 0, 0 };
  796. unsigned int i;
  797. for (i = 0; i < 3; i++) {
  798. u8 ss;
  799. /*
  800. * Only consider slices where one, and only one, subslice has 7
  801. * EUs
  802. */
  803. if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
  804. continue;
  805. /*
  806. * subslice_7eu[i] != 0 (because of the check above) and
  807. * ss_max == 4 (maximum number of subslices possible per slice)
  808. *
  809. * -> 0 <= ss <= 3;
  810. */
  811. ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
  812. vals[i] = 3 - ss;
  813. }
  814. if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
  815. return 0;
  816. /* Tune IZ hashing. See intel_device_info_runtime_init() */
  817. WA_SET_FIELD_MASKED(GEN7_GT_MODE,
  818. GEN9_IZ_HASHING_MASK(2) |
  819. GEN9_IZ_HASHING_MASK(1) |
  820. GEN9_IZ_HASHING_MASK(0),
  821. GEN9_IZ_HASHING(2, vals[2]) |
  822. GEN9_IZ_HASHING(1, vals[1]) |
  823. GEN9_IZ_HASHING(0, vals[0]));
  824. return 0;
  825. }
  826. static int skl_init_workarounds(struct intel_engine_cs *engine)
  827. {
  828. struct drm_i915_private *dev_priv = engine->i915;
  829. int ret;
  830. ret = gen9_init_workarounds(engine);
  831. if (ret)
  832. return ret;
  833. /*
  834. * Actual WA is to disable percontext preemption granularity control
  835. * until D0 which is the default case so this is equivalent to
  836. * !WaDisablePerCtxtPreemptionGranularityControl:skl
  837. */
  838. I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
  839. _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
  840. /* WaEnableGapsTsvCreditFix:skl */
  841. I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
  842. GEN9_GAPS_TSV_CREDIT_DISABLE));
  843. /* WaDisableSbeCacheDispatchPortSharing:skl */
  844. if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
  845. WA_SET_BIT_MASKED(
  846. GEN7_HALF_SLICE_CHICKEN1,
  847. GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
  848. /* WaDisableGafsUnitClkGating:skl */
  849. WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
  850. /* WaInPlaceDecompressionHang:skl */
  851. if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
  852. WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
  853. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
  854. /* WaDisableLSQCROPERFforOCL:skl */
  855. ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
  856. if (ret)
  857. return ret;
  858. return skl_tune_iz_hashing(engine);
  859. }
  860. static int bxt_init_workarounds(struct intel_engine_cs *engine)
  861. {
  862. struct drm_i915_private *dev_priv = engine->i915;
  863. int ret;
  864. ret = gen9_init_workarounds(engine);
  865. if (ret)
  866. return ret;
  867. /* WaStoreMultiplePTEenable:bxt */
  868. /* This is a requirement according to Hardware specification */
  869. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
  870. I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
  871. /* WaSetClckGatingDisableMedia:bxt */
  872. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
  873. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
  874. ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
  875. }
  876. /* WaDisableThreadStallDopClockGating:bxt */
  877. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
  878. STALL_DOP_GATING_DISABLE);
  879. /* WaDisablePooledEuLoadBalancingFix:bxt */
  880. if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
  881. WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
  882. GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
  883. }
  884. /* WaDisableSbeCacheDispatchPortSharing:bxt */
  885. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
  886. WA_SET_BIT_MASKED(
  887. GEN7_HALF_SLICE_CHICKEN1,
  888. GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
  889. }
  890. /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
  891. /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
  892. /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
  893. /* WaDisableLSQCROPERFforOCL:bxt */
  894. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
  895. ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
  896. if (ret)
  897. return ret;
  898. ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
  899. if (ret)
  900. return ret;
  901. }
  902. /* WaProgramL3SqcReg1DefaultForPerf:bxt */
  903. if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
  904. I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
  905. L3_HIGH_PRIO_CREDITS(2));
  906. /* WaToEnableHwFixForPushConstHWBug:bxt */
  907. if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
  908. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  909. GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
  910. /* WaInPlaceDecompressionHang:bxt */
  911. if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
  912. WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
  913. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
  914. return 0;
  915. }
  916. static int kbl_init_workarounds(struct intel_engine_cs *engine)
  917. {
  918. struct drm_i915_private *dev_priv = engine->i915;
  919. int ret;
  920. ret = gen9_init_workarounds(engine);
  921. if (ret)
  922. return ret;
  923. /* WaEnableGapsTsvCreditFix:kbl */
  924. I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
  925. GEN9_GAPS_TSV_CREDIT_DISABLE));
  926. /* WaDisableDynamicCreditSharing:kbl */
  927. if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
  928. WA_SET_BIT(GAMT_CHKN_BIT_REG,
  929. GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
  930. /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
  931. if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
  932. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  933. HDC_FENCE_DEST_SLM_DISABLE);
  934. /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
  935. * involving this register should also be added to WA batch as required.
  936. */
  937. if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
  938. /* WaDisableLSQCROPERFforOCL:kbl */
  939. I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
  940. GEN8_LQSC_RO_PERF_DIS);
  941. /* WaToEnableHwFixForPushConstHWBug:kbl */
  942. if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
  943. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  944. GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
  945. /* WaDisableGafsUnitClkGating:kbl */
  946. WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
  947. /* WaDisableSbeCacheDispatchPortSharing:kbl */
  948. WA_SET_BIT_MASKED(
  949. GEN7_HALF_SLICE_CHICKEN1,
  950. GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
  951. /* WaInPlaceDecompressionHang:kbl */
  952. WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
  953. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
  954. /* WaDisableLSQCROPERFforOCL:kbl */
  955. ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
  956. if (ret)
  957. return ret;
  958. return 0;
  959. }
  960. int init_workarounds_ring(struct intel_engine_cs *engine)
  961. {
  962. struct drm_i915_private *dev_priv = engine->i915;
  963. WARN_ON(engine->id != RCS);
  964. dev_priv->workarounds.count = 0;
  965. dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
  966. if (IS_BROADWELL(dev_priv))
  967. return bdw_init_workarounds(engine);
  968. if (IS_CHERRYVIEW(dev_priv))
  969. return chv_init_workarounds(engine);
  970. if (IS_SKYLAKE(dev_priv))
  971. return skl_init_workarounds(engine);
  972. if (IS_BROXTON(dev_priv))
  973. return bxt_init_workarounds(engine);
  974. if (IS_KABYLAKE(dev_priv))
  975. return kbl_init_workarounds(engine);
  976. return 0;
  977. }
  978. static int init_render_ring(struct intel_engine_cs *engine)
  979. {
  980. struct drm_i915_private *dev_priv = engine->i915;
  981. int ret = init_ring_common(engine);
  982. if (ret)
  983. return ret;
  984. /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
  985. if (IS_GEN(dev_priv, 4, 6))
  986. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  987. /* We need to disable the AsyncFlip performance optimisations in order
  988. * to use MI_WAIT_FOR_EVENT within the CS. It should already be
  989. * programmed to '1' on all products.
  990. *
  991. * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
  992. */
  993. if (IS_GEN(dev_priv, 6, 7))
  994. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  995. /* Required for the hardware to program scanline values for waiting */
  996. /* WaEnableFlushTlbInvalidationMode:snb */
  997. if (IS_GEN6(dev_priv))
  998. I915_WRITE(GFX_MODE,
  999. _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
  1000. /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
  1001. if (IS_GEN7(dev_priv))
  1002. I915_WRITE(GFX_MODE_GEN7,
  1003. _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
  1004. _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  1005. if (IS_GEN6(dev_priv)) {
  1006. /* From the Sandybridge PRM, volume 1 part 3, page 24:
  1007. * "If this bit is set, STCunit will have LRA as replacement
  1008. * policy. [...] This bit must be reset. LRA replacement
  1009. * policy is not supported."
  1010. */
  1011. I915_WRITE(CACHE_MODE_0,
  1012. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  1013. }
  1014. if (IS_GEN(dev_priv, 6, 7))
  1015. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  1016. if (INTEL_INFO(dev_priv)->gen >= 6)
  1017. I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
  1018. return init_workarounds_ring(engine);
  1019. }
  1020. static void render_ring_cleanup(struct intel_engine_cs *engine)
  1021. {
  1022. struct drm_i915_private *dev_priv = engine->i915;
  1023. i915_vma_unpin_and_release(&dev_priv->semaphore);
  1024. }
  1025. static int gen8_rcs_signal(struct drm_i915_gem_request *req)
  1026. {
  1027. struct intel_ring *ring = req->ring;
  1028. struct drm_i915_private *dev_priv = req->i915;
  1029. struct intel_engine_cs *waiter;
  1030. enum intel_engine_id id;
  1031. int ret, num_rings;
  1032. num_rings = INTEL_INFO(dev_priv)->num_rings;
  1033. ret = intel_ring_begin(req, (num_rings-1) * 8);
  1034. if (ret)
  1035. return ret;
  1036. for_each_engine_id(waiter, dev_priv, id) {
  1037. u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
  1038. if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
  1039. continue;
  1040. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
  1041. intel_ring_emit(ring,
  1042. PIPE_CONTROL_GLOBAL_GTT_IVB |
  1043. PIPE_CONTROL_QW_WRITE |
  1044. PIPE_CONTROL_CS_STALL);
  1045. intel_ring_emit(ring, lower_32_bits(gtt_offset));
  1046. intel_ring_emit(ring, upper_32_bits(gtt_offset));
  1047. intel_ring_emit(ring, req->fence.seqno);
  1048. intel_ring_emit(ring, 0);
  1049. intel_ring_emit(ring,
  1050. MI_SEMAPHORE_SIGNAL |
  1051. MI_SEMAPHORE_TARGET(waiter->hw_id));
  1052. intel_ring_emit(ring, 0);
  1053. }
  1054. intel_ring_advance(ring);
  1055. return 0;
  1056. }
  1057. static int gen8_xcs_signal(struct drm_i915_gem_request *req)
  1058. {
  1059. struct intel_ring *ring = req->ring;
  1060. struct drm_i915_private *dev_priv = req->i915;
  1061. struct intel_engine_cs *waiter;
  1062. enum intel_engine_id id;
  1063. int ret, num_rings;
  1064. num_rings = INTEL_INFO(dev_priv)->num_rings;
  1065. ret = intel_ring_begin(req, (num_rings-1) * 6);
  1066. if (ret)
  1067. return ret;
  1068. for_each_engine_id(waiter, dev_priv, id) {
  1069. u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
  1070. if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
  1071. continue;
  1072. intel_ring_emit(ring,
  1073. (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
  1074. intel_ring_emit(ring,
  1075. lower_32_bits(gtt_offset) |
  1076. MI_FLUSH_DW_USE_GTT);
  1077. intel_ring_emit(ring, upper_32_bits(gtt_offset));
  1078. intel_ring_emit(ring, req->fence.seqno);
  1079. intel_ring_emit(ring,
  1080. MI_SEMAPHORE_SIGNAL |
  1081. MI_SEMAPHORE_TARGET(waiter->hw_id));
  1082. intel_ring_emit(ring, 0);
  1083. }
  1084. intel_ring_advance(ring);
  1085. return 0;
  1086. }
  1087. static int gen6_signal(struct drm_i915_gem_request *req)
  1088. {
  1089. struct intel_ring *ring = req->ring;
  1090. struct drm_i915_private *dev_priv = req->i915;
  1091. struct intel_engine_cs *engine;
  1092. int ret, num_rings;
  1093. num_rings = INTEL_INFO(dev_priv)->num_rings;
  1094. ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
  1095. if (ret)
  1096. return ret;
  1097. for_each_engine(engine, dev_priv) {
  1098. i915_reg_t mbox_reg;
  1099. if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
  1100. continue;
  1101. mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
  1102. if (i915_mmio_reg_valid(mbox_reg)) {
  1103. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  1104. intel_ring_emit_reg(ring, mbox_reg);
  1105. intel_ring_emit(ring, req->fence.seqno);
  1106. }
  1107. }
  1108. /* If num_dwords was rounded, make sure the tail pointer is correct */
  1109. if (num_rings % 2 == 0)
  1110. intel_ring_emit(ring, MI_NOOP);
  1111. intel_ring_advance(ring);
  1112. return 0;
  1113. }
  1114. static void i9xx_submit_request(struct drm_i915_gem_request *request)
  1115. {
  1116. struct drm_i915_private *dev_priv = request->i915;
  1117. I915_WRITE_TAIL(request->engine,
  1118. intel_ring_offset(request->ring, request->tail));
  1119. }
  1120. static int i9xx_emit_request(struct drm_i915_gem_request *req)
  1121. {
  1122. struct intel_ring *ring = req->ring;
  1123. int ret;
  1124. ret = intel_ring_begin(req, 4);
  1125. if (ret)
  1126. return ret;
  1127. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  1128. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  1129. intel_ring_emit(ring, req->fence.seqno);
  1130. intel_ring_emit(ring, MI_USER_INTERRUPT);
  1131. intel_ring_advance(ring);
  1132. req->tail = ring->tail;
  1133. return 0;
  1134. }
  1135. /**
  1136. * gen6_sema_emit_request - Update the semaphore mailbox registers
  1137. *
  1138. * @request - request to write to the ring
  1139. *
  1140. * Update the mailbox registers in the *other* rings with the current seqno.
  1141. * This acts like a signal in the canonical semaphore.
  1142. */
  1143. static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
  1144. {
  1145. int ret;
  1146. ret = req->engine->semaphore.signal(req);
  1147. if (ret)
  1148. return ret;
  1149. return i9xx_emit_request(req);
  1150. }
  1151. static int gen8_render_emit_request(struct drm_i915_gem_request *req)
  1152. {
  1153. struct intel_engine_cs *engine = req->engine;
  1154. struct intel_ring *ring = req->ring;
  1155. int ret;
  1156. if (engine->semaphore.signal) {
  1157. ret = engine->semaphore.signal(req);
  1158. if (ret)
  1159. return ret;
  1160. }
  1161. ret = intel_ring_begin(req, 8);
  1162. if (ret)
  1163. return ret;
  1164. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
  1165. intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
  1166. PIPE_CONTROL_CS_STALL |
  1167. PIPE_CONTROL_QW_WRITE));
  1168. intel_ring_emit(ring, intel_hws_seqno_address(engine));
  1169. intel_ring_emit(ring, 0);
  1170. intel_ring_emit(ring, i915_gem_request_get_seqno(req));
  1171. /* We're thrashing one dword of HWS. */
  1172. intel_ring_emit(ring, 0);
  1173. intel_ring_emit(ring, MI_USER_INTERRUPT);
  1174. intel_ring_emit(ring, MI_NOOP);
  1175. intel_ring_advance(ring);
  1176. req->tail = ring->tail;
  1177. return 0;
  1178. }
  1179. /**
  1180. * intel_ring_sync - sync the waiter to the signaller on seqno
  1181. *
  1182. * @waiter - ring that is waiting
  1183. * @signaller - ring which has, or will signal
  1184. * @seqno - seqno which the waiter will block on
  1185. */
  1186. static int
  1187. gen8_ring_sync_to(struct drm_i915_gem_request *req,
  1188. struct drm_i915_gem_request *signal)
  1189. {
  1190. struct intel_ring *ring = req->ring;
  1191. struct drm_i915_private *dev_priv = req->i915;
  1192. u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
  1193. struct i915_hw_ppgtt *ppgtt;
  1194. int ret;
  1195. ret = intel_ring_begin(req, 4);
  1196. if (ret)
  1197. return ret;
  1198. intel_ring_emit(ring,
  1199. MI_SEMAPHORE_WAIT |
  1200. MI_SEMAPHORE_GLOBAL_GTT |
  1201. MI_SEMAPHORE_SAD_GTE_SDD);
  1202. intel_ring_emit(ring, signal->fence.seqno);
  1203. intel_ring_emit(ring, lower_32_bits(offset));
  1204. intel_ring_emit(ring, upper_32_bits(offset));
  1205. intel_ring_advance(ring);
  1206. /* When the !RCS engines idle waiting upon a semaphore, they lose their
  1207. * pagetables and we must reload them before executing the batch.
  1208. * We do this on the i915_switch_context() following the wait and
  1209. * before the dispatch.
  1210. */
  1211. ppgtt = req->ctx->ppgtt;
  1212. if (ppgtt && req->engine->id != RCS)
  1213. ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
  1214. return 0;
  1215. }
  1216. static int
  1217. gen6_ring_sync_to(struct drm_i915_gem_request *req,
  1218. struct drm_i915_gem_request *signal)
  1219. {
  1220. struct intel_ring *ring = req->ring;
  1221. u32 dw1 = MI_SEMAPHORE_MBOX |
  1222. MI_SEMAPHORE_COMPARE |
  1223. MI_SEMAPHORE_REGISTER;
  1224. u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
  1225. int ret;
  1226. WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
  1227. ret = intel_ring_begin(req, 4);
  1228. if (ret)
  1229. return ret;
  1230. intel_ring_emit(ring, dw1 | wait_mbox);
  1231. /* Throughout all of the GEM code, seqno passed implies our current
  1232. * seqno is >= the last seqno executed. However for hardware the
  1233. * comparison is strictly greater than.
  1234. */
  1235. intel_ring_emit(ring, signal->fence.seqno - 1);
  1236. intel_ring_emit(ring, 0);
  1237. intel_ring_emit(ring, MI_NOOP);
  1238. intel_ring_advance(ring);
  1239. return 0;
  1240. }
  1241. static void
  1242. gen5_seqno_barrier(struct intel_engine_cs *engine)
  1243. {
  1244. /* MI_STORE are internally buffered by the GPU and not flushed
  1245. * either by MI_FLUSH or SyncFlush or any other combination of
  1246. * MI commands.
  1247. *
  1248. * "Only the submission of the store operation is guaranteed.
  1249. * The write result will be complete (coherent) some time later
  1250. * (this is practically a finite period but there is no guaranteed
  1251. * latency)."
  1252. *
  1253. * Empirically, we observe that we need a delay of at least 75us to
  1254. * be sure that the seqno write is visible by the CPU.
  1255. */
  1256. usleep_range(125, 250);
  1257. }
  1258. static void
  1259. gen6_seqno_barrier(struct intel_engine_cs *engine)
  1260. {
  1261. struct drm_i915_private *dev_priv = engine->i915;
  1262. /* Workaround to force correct ordering between irq and seqno writes on
  1263. * ivb (and maybe also on snb) by reading from a CS register (like
  1264. * ACTHD) before reading the status page.
  1265. *
  1266. * Note that this effectively stalls the read by the time it takes to
  1267. * do a memory transaction, which more or less ensures that the write
  1268. * from the GPU has sufficient time to invalidate the CPU cacheline.
  1269. * Alternatively we could delay the interrupt from the CS ring to give
  1270. * the write time to land, but that would incur a delay after every
  1271. * batch i.e. much more frequent than a delay when waiting for the
  1272. * interrupt (with the same net latency).
  1273. *
  1274. * Also note that to prevent whole machine hangs on gen7, we have to
  1275. * take the spinlock to guard against concurrent cacheline access.
  1276. */
  1277. spin_lock_irq(&dev_priv->uncore.lock);
  1278. POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
  1279. spin_unlock_irq(&dev_priv->uncore.lock);
  1280. }
  1281. static void
  1282. gen5_irq_enable(struct intel_engine_cs *engine)
  1283. {
  1284. gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
  1285. }
  1286. static void
  1287. gen5_irq_disable(struct intel_engine_cs *engine)
  1288. {
  1289. gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
  1290. }
  1291. static void
  1292. i9xx_irq_enable(struct intel_engine_cs *engine)
  1293. {
  1294. struct drm_i915_private *dev_priv = engine->i915;
  1295. dev_priv->irq_mask &= ~engine->irq_enable_mask;
  1296. I915_WRITE(IMR, dev_priv->irq_mask);
  1297. POSTING_READ_FW(RING_IMR(engine->mmio_base));
  1298. }
  1299. static void
  1300. i9xx_irq_disable(struct intel_engine_cs *engine)
  1301. {
  1302. struct drm_i915_private *dev_priv = engine->i915;
  1303. dev_priv->irq_mask |= engine->irq_enable_mask;
  1304. I915_WRITE(IMR, dev_priv->irq_mask);
  1305. }
  1306. static void
  1307. i8xx_irq_enable(struct intel_engine_cs *engine)
  1308. {
  1309. struct drm_i915_private *dev_priv = engine->i915;
  1310. dev_priv->irq_mask &= ~engine->irq_enable_mask;
  1311. I915_WRITE16(IMR, dev_priv->irq_mask);
  1312. POSTING_READ16(RING_IMR(engine->mmio_base));
  1313. }
  1314. static void
  1315. i8xx_irq_disable(struct intel_engine_cs *engine)
  1316. {
  1317. struct drm_i915_private *dev_priv = engine->i915;
  1318. dev_priv->irq_mask |= engine->irq_enable_mask;
  1319. I915_WRITE16(IMR, dev_priv->irq_mask);
  1320. }
  1321. static int
  1322. bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  1323. {
  1324. struct intel_ring *ring = req->ring;
  1325. int ret;
  1326. ret = intel_ring_begin(req, 2);
  1327. if (ret)
  1328. return ret;
  1329. intel_ring_emit(ring, MI_FLUSH);
  1330. intel_ring_emit(ring, MI_NOOP);
  1331. intel_ring_advance(ring);
  1332. return 0;
  1333. }
  1334. static void
  1335. gen6_irq_enable(struct intel_engine_cs *engine)
  1336. {
  1337. struct drm_i915_private *dev_priv = engine->i915;
  1338. I915_WRITE_IMR(engine,
  1339. ~(engine->irq_enable_mask |
  1340. engine->irq_keep_mask));
  1341. gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
  1342. }
  1343. static void
  1344. gen6_irq_disable(struct intel_engine_cs *engine)
  1345. {
  1346. struct drm_i915_private *dev_priv = engine->i915;
  1347. I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
  1348. gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
  1349. }
  1350. static void
  1351. hsw_vebox_irq_enable(struct intel_engine_cs *engine)
  1352. {
  1353. struct drm_i915_private *dev_priv = engine->i915;
  1354. I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
  1355. gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
  1356. }
  1357. static void
  1358. hsw_vebox_irq_disable(struct intel_engine_cs *engine)
  1359. {
  1360. struct drm_i915_private *dev_priv = engine->i915;
  1361. I915_WRITE_IMR(engine, ~0);
  1362. gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
  1363. }
  1364. static void
  1365. gen8_irq_enable(struct intel_engine_cs *engine)
  1366. {
  1367. struct drm_i915_private *dev_priv = engine->i915;
  1368. I915_WRITE_IMR(engine,
  1369. ~(engine->irq_enable_mask |
  1370. engine->irq_keep_mask));
  1371. POSTING_READ_FW(RING_IMR(engine->mmio_base));
  1372. }
  1373. static void
  1374. gen8_irq_disable(struct intel_engine_cs *engine)
  1375. {
  1376. struct drm_i915_private *dev_priv = engine->i915;
  1377. I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
  1378. }
  1379. static int
  1380. i965_emit_bb_start(struct drm_i915_gem_request *req,
  1381. u64 offset, u32 length,
  1382. unsigned int dispatch_flags)
  1383. {
  1384. struct intel_ring *ring = req->ring;
  1385. int ret;
  1386. ret = intel_ring_begin(req, 2);
  1387. if (ret)
  1388. return ret;
  1389. intel_ring_emit(ring,
  1390. MI_BATCH_BUFFER_START |
  1391. MI_BATCH_GTT |
  1392. (dispatch_flags & I915_DISPATCH_SECURE ?
  1393. 0 : MI_BATCH_NON_SECURE_I965));
  1394. intel_ring_emit(ring, offset);
  1395. intel_ring_advance(ring);
  1396. return 0;
  1397. }
  1398. /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
  1399. #define I830_BATCH_LIMIT (256*1024)
  1400. #define I830_TLB_ENTRIES (2)
  1401. #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
  1402. static int
  1403. i830_emit_bb_start(struct drm_i915_gem_request *req,
  1404. u64 offset, u32 len,
  1405. unsigned int dispatch_flags)
  1406. {
  1407. struct intel_ring *ring = req->ring;
  1408. u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
  1409. int ret;
  1410. ret = intel_ring_begin(req, 6);
  1411. if (ret)
  1412. return ret;
  1413. /* Evict the invalid PTE TLBs */
  1414. intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
  1415. intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
  1416. intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
  1417. intel_ring_emit(ring, cs_offset);
  1418. intel_ring_emit(ring, 0xdeadbeef);
  1419. intel_ring_emit(ring, MI_NOOP);
  1420. intel_ring_advance(ring);
  1421. if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
  1422. if (len > I830_BATCH_LIMIT)
  1423. return -ENOSPC;
  1424. ret = intel_ring_begin(req, 6 + 2);
  1425. if (ret)
  1426. return ret;
  1427. /* Blit the batch (which has now all relocs applied) to the
  1428. * stable batch scratch bo area (so that the CS never
  1429. * stumbles over its tlb invalidation bug) ...
  1430. */
  1431. intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
  1432. intel_ring_emit(ring,
  1433. BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
  1434. intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
  1435. intel_ring_emit(ring, cs_offset);
  1436. intel_ring_emit(ring, 4096);
  1437. intel_ring_emit(ring, offset);
  1438. intel_ring_emit(ring, MI_FLUSH);
  1439. intel_ring_emit(ring, MI_NOOP);
  1440. intel_ring_advance(ring);
  1441. /* ... and execute it. */
  1442. offset = cs_offset;
  1443. }
  1444. ret = intel_ring_begin(req, 2);
  1445. if (ret)
  1446. return ret;
  1447. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
  1448. intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
  1449. 0 : MI_BATCH_NON_SECURE));
  1450. intel_ring_advance(ring);
  1451. return 0;
  1452. }
  1453. static int
  1454. i915_emit_bb_start(struct drm_i915_gem_request *req,
  1455. u64 offset, u32 len,
  1456. unsigned int dispatch_flags)
  1457. {
  1458. struct intel_ring *ring = req->ring;
  1459. int ret;
  1460. ret = intel_ring_begin(req, 2);
  1461. if (ret)
  1462. return ret;
  1463. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
  1464. intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
  1465. 0 : MI_BATCH_NON_SECURE));
  1466. intel_ring_advance(ring);
  1467. return 0;
  1468. }
  1469. static void cleanup_phys_status_page(struct intel_engine_cs *engine)
  1470. {
  1471. struct drm_i915_private *dev_priv = engine->i915;
  1472. if (!dev_priv->status_page_dmah)
  1473. return;
  1474. drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
  1475. engine->status_page.page_addr = NULL;
  1476. }
  1477. static void cleanup_status_page(struct intel_engine_cs *engine)
  1478. {
  1479. struct i915_vma *vma;
  1480. vma = fetch_and_zero(&engine->status_page.vma);
  1481. if (!vma)
  1482. return;
  1483. i915_vma_unpin(vma);
  1484. i915_gem_object_unpin_map(vma->obj);
  1485. i915_vma_put(vma);
  1486. }
  1487. static int init_status_page(struct intel_engine_cs *engine)
  1488. {
  1489. struct drm_i915_gem_object *obj;
  1490. struct i915_vma *vma;
  1491. unsigned int flags;
  1492. int ret;
  1493. obj = i915_gem_object_create(&engine->i915->drm, 4096);
  1494. if (IS_ERR(obj)) {
  1495. DRM_ERROR("Failed to allocate status page\n");
  1496. return PTR_ERR(obj);
  1497. }
  1498. ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  1499. if (ret)
  1500. goto err;
  1501. vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
  1502. if (IS_ERR(vma)) {
  1503. ret = PTR_ERR(vma);
  1504. goto err;
  1505. }
  1506. flags = PIN_GLOBAL;
  1507. if (!HAS_LLC(engine->i915))
  1508. /* On g33, we cannot place HWS above 256MiB, so
  1509. * restrict its pinning to the low mappable arena.
  1510. * Though this restriction is not documented for
  1511. * gen4, gen5, or byt, they also behave similarly
  1512. * and hang if the HWS is placed at the top of the
  1513. * GTT. To generalise, it appears that all !llc
  1514. * platforms have issues with us placing the HWS
  1515. * above the mappable region (even though we never
  1516. * actualy map it).
  1517. */
  1518. flags |= PIN_MAPPABLE;
  1519. ret = i915_vma_pin(vma, 0, 4096, flags);
  1520. if (ret)
  1521. goto err;
  1522. engine->status_page.vma = vma;
  1523. engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
  1524. engine->status_page.page_addr =
  1525. i915_gem_object_pin_map(obj, I915_MAP_WB);
  1526. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  1527. engine->name, i915_ggtt_offset(vma));
  1528. return 0;
  1529. err:
  1530. i915_gem_object_put(obj);
  1531. return ret;
  1532. }
  1533. static int init_phys_status_page(struct intel_engine_cs *engine)
  1534. {
  1535. struct drm_i915_private *dev_priv = engine->i915;
  1536. dev_priv->status_page_dmah =
  1537. drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
  1538. if (!dev_priv->status_page_dmah)
  1539. return -ENOMEM;
  1540. engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1541. memset(engine->status_page.page_addr, 0, PAGE_SIZE);
  1542. return 0;
  1543. }
  1544. int intel_ring_pin(struct intel_ring *ring)
  1545. {
  1546. /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
  1547. unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
  1548. enum i915_map_type map;
  1549. struct i915_vma *vma = ring->vma;
  1550. void *addr;
  1551. int ret;
  1552. GEM_BUG_ON(ring->vaddr);
  1553. map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
  1554. if (vma->obj->stolen)
  1555. flags |= PIN_MAPPABLE;
  1556. if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
  1557. if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
  1558. ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
  1559. else
  1560. ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
  1561. if (unlikely(ret))
  1562. return ret;
  1563. }
  1564. ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
  1565. if (unlikely(ret))
  1566. return ret;
  1567. if (i915_vma_is_map_and_fenceable(vma))
  1568. addr = (void __force *)i915_vma_pin_iomap(vma);
  1569. else
  1570. addr = i915_gem_object_pin_map(vma->obj, map);
  1571. if (IS_ERR(addr))
  1572. goto err;
  1573. ring->vaddr = addr;
  1574. return 0;
  1575. err:
  1576. i915_vma_unpin(vma);
  1577. return PTR_ERR(addr);
  1578. }
  1579. void intel_ring_unpin(struct intel_ring *ring)
  1580. {
  1581. GEM_BUG_ON(!ring->vma);
  1582. GEM_BUG_ON(!ring->vaddr);
  1583. if (i915_vma_is_map_and_fenceable(ring->vma))
  1584. i915_vma_unpin_iomap(ring->vma);
  1585. else
  1586. i915_gem_object_unpin_map(ring->vma->obj);
  1587. ring->vaddr = NULL;
  1588. i915_vma_unpin(ring->vma);
  1589. }
  1590. static struct i915_vma *
  1591. intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
  1592. {
  1593. struct drm_i915_gem_object *obj;
  1594. struct i915_vma *vma;
  1595. obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
  1596. if (!obj)
  1597. obj = i915_gem_object_create(&dev_priv->drm, size);
  1598. if (IS_ERR(obj))
  1599. return ERR_CAST(obj);
  1600. /* mark ring buffers as read-only from GPU side by default */
  1601. obj->gt_ro = 1;
  1602. vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
  1603. if (IS_ERR(vma))
  1604. goto err;
  1605. return vma;
  1606. err:
  1607. i915_gem_object_put(obj);
  1608. return vma;
  1609. }
  1610. struct intel_ring *
  1611. intel_engine_create_ring(struct intel_engine_cs *engine, int size)
  1612. {
  1613. struct intel_ring *ring;
  1614. struct i915_vma *vma;
  1615. GEM_BUG_ON(!is_power_of_2(size));
  1616. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  1617. if (!ring)
  1618. return ERR_PTR(-ENOMEM);
  1619. ring->engine = engine;
  1620. INIT_LIST_HEAD(&ring->request_list);
  1621. ring->size = size;
  1622. /* Workaround an erratum on the i830 which causes a hang if
  1623. * the TAIL pointer points to within the last 2 cachelines
  1624. * of the buffer.
  1625. */
  1626. ring->effective_size = size;
  1627. if (IS_I830(engine->i915) || IS_845G(engine->i915))
  1628. ring->effective_size -= 2 * CACHELINE_BYTES;
  1629. ring->last_retired_head = -1;
  1630. intel_ring_update_space(ring);
  1631. vma = intel_ring_create_vma(engine->i915, size);
  1632. if (IS_ERR(vma)) {
  1633. kfree(ring);
  1634. return ERR_CAST(vma);
  1635. }
  1636. ring->vma = vma;
  1637. return ring;
  1638. }
  1639. void
  1640. intel_ring_free(struct intel_ring *ring)
  1641. {
  1642. i915_vma_put(ring->vma);
  1643. kfree(ring);
  1644. }
  1645. static int intel_ring_context_pin(struct i915_gem_context *ctx,
  1646. struct intel_engine_cs *engine)
  1647. {
  1648. struct intel_context *ce = &ctx->engine[engine->id];
  1649. int ret;
  1650. lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  1651. if (ce->pin_count++)
  1652. return 0;
  1653. if (ce->state) {
  1654. ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
  1655. if (ret)
  1656. goto error;
  1657. ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
  1658. PIN_GLOBAL | PIN_HIGH);
  1659. if (ret)
  1660. goto error;
  1661. }
  1662. /* The kernel context is only used as a placeholder for flushing the
  1663. * active context. It is never used for submitting user rendering and
  1664. * as such never requires the golden render context, and so we can skip
  1665. * emitting it when we switch to the kernel context. This is required
  1666. * as during eviction we cannot allocate and pin the renderstate in
  1667. * order to initialise the context.
  1668. */
  1669. if (ctx == ctx->i915->kernel_context)
  1670. ce->initialised = true;
  1671. i915_gem_context_get(ctx);
  1672. return 0;
  1673. error:
  1674. ce->pin_count = 0;
  1675. return ret;
  1676. }
  1677. static void intel_ring_context_unpin(struct i915_gem_context *ctx,
  1678. struct intel_engine_cs *engine)
  1679. {
  1680. struct intel_context *ce = &ctx->engine[engine->id];
  1681. lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  1682. if (--ce->pin_count)
  1683. return;
  1684. if (ce->state)
  1685. i915_vma_unpin(ce->state);
  1686. i915_gem_context_put(ctx);
  1687. }
  1688. static int intel_init_ring_buffer(struct intel_engine_cs *engine)
  1689. {
  1690. struct drm_i915_private *dev_priv = engine->i915;
  1691. struct intel_ring *ring;
  1692. int ret;
  1693. WARN_ON(engine->buffer);
  1694. intel_engine_setup_common(engine);
  1695. memset(engine->semaphore.sync_seqno, 0,
  1696. sizeof(engine->semaphore.sync_seqno));
  1697. ret = intel_engine_init_common(engine);
  1698. if (ret)
  1699. goto error;
  1700. /* We may need to do things with the shrinker which
  1701. * require us to immediately switch back to the default
  1702. * context. This can cause a problem as pinning the
  1703. * default context also requires GTT space which may not
  1704. * be available. To avoid this we always pin the default
  1705. * context.
  1706. */
  1707. ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
  1708. if (ret)
  1709. goto error;
  1710. ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
  1711. if (IS_ERR(ring)) {
  1712. ret = PTR_ERR(ring);
  1713. goto error;
  1714. }
  1715. if (HWS_NEEDS_PHYSICAL(dev_priv)) {
  1716. WARN_ON(engine->id != RCS);
  1717. ret = init_phys_status_page(engine);
  1718. if (ret)
  1719. goto error;
  1720. } else {
  1721. ret = init_status_page(engine);
  1722. if (ret)
  1723. goto error;
  1724. }
  1725. ret = intel_ring_pin(ring);
  1726. if (ret) {
  1727. intel_ring_free(ring);
  1728. goto error;
  1729. }
  1730. engine->buffer = ring;
  1731. return 0;
  1732. error:
  1733. intel_engine_cleanup(engine);
  1734. return ret;
  1735. }
  1736. void intel_engine_cleanup(struct intel_engine_cs *engine)
  1737. {
  1738. struct drm_i915_private *dev_priv;
  1739. if (!intel_engine_initialized(engine))
  1740. return;
  1741. dev_priv = engine->i915;
  1742. if (engine->buffer) {
  1743. WARN_ON(INTEL_GEN(dev_priv) > 2 &&
  1744. (I915_READ_MODE(engine) & MODE_IDLE) == 0);
  1745. intel_ring_unpin(engine->buffer);
  1746. intel_ring_free(engine->buffer);
  1747. engine->buffer = NULL;
  1748. }
  1749. if (engine->cleanup)
  1750. engine->cleanup(engine);
  1751. if (HWS_NEEDS_PHYSICAL(dev_priv)) {
  1752. WARN_ON(engine->id != RCS);
  1753. cleanup_phys_status_page(engine);
  1754. } else {
  1755. cleanup_status_page(engine);
  1756. }
  1757. intel_engine_cleanup_common(engine);
  1758. intel_ring_context_unpin(dev_priv->kernel_context, engine);
  1759. engine->i915 = NULL;
  1760. }
  1761. void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
  1762. {
  1763. struct intel_engine_cs *engine;
  1764. for_each_engine(engine, dev_priv) {
  1765. engine->buffer->head = engine->buffer->tail;
  1766. engine->buffer->last_retired_head = -1;
  1767. }
  1768. }
  1769. int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
  1770. {
  1771. int ret;
  1772. /* Flush enough space to reduce the likelihood of waiting after
  1773. * we start building the request - in which case we will just
  1774. * have to repeat work.
  1775. */
  1776. request->reserved_space += LEGACY_REQUEST_SIZE;
  1777. request->ring = request->engine->buffer;
  1778. ret = intel_ring_begin(request, 0);
  1779. if (ret)
  1780. return ret;
  1781. request->reserved_space -= LEGACY_REQUEST_SIZE;
  1782. return 0;
  1783. }
  1784. static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
  1785. {
  1786. struct intel_ring *ring = req->ring;
  1787. struct drm_i915_gem_request *target;
  1788. int ret;
  1789. intel_ring_update_space(ring);
  1790. if (ring->space >= bytes)
  1791. return 0;
  1792. /*
  1793. * Space is reserved in the ringbuffer for finalising the request,
  1794. * as that cannot be allowed to fail. During request finalisation,
  1795. * reserved_space is set to 0 to stop the overallocation and the
  1796. * assumption is that then we never need to wait (which has the
  1797. * risk of failing with EINTR).
  1798. *
  1799. * See also i915_gem_request_alloc() and i915_add_request().
  1800. */
  1801. GEM_BUG_ON(!req->reserved_space);
  1802. list_for_each_entry(target, &ring->request_list, ring_link) {
  1803. unsigned space;
  1804. /* Would completion of this request free enough space? */
  1805. space = __intel_ring_space(target->postfix, ring->tail,
  1806. ring->size);
  1807. if (space >= bytes)
  1808. break;
  1809. }
  1810. if (WARN_ON(&target->ring_link == &ring->request_list))
  1811. return -ENOSPC;
  1812. ret = i915_wait_request(target,
  1813. I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
  1814. NULL, NO_WAITBOOST);
  1815. if (ret)
  1816. return ret;
  1817. i915_gem_request_retire_upto(target);
  1818. intel_ring_update_space(ring);
  1819. GEM_BUG_ON(ring->space < bytes);
  1820. return 0;
  1821. }
  1822. int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
  1823. {
  1824. struct intel_ring *ring = req->ring;
  1825. int remain_actual = ring->size - ring->tail;
  1826. int remain_usable = ring->effective_size - ring->tail;
  1827. int bytes = num_dwords * sizeof(u32);
  1828. int total_bytes, wait_bytes;
  1829. bool need_wrap = false;
  1830. total_bytes = bytes + req->reserved_space;
  1831. if (unlikely(bytes > remain_usable)) {
  1832. /*
  1833. * Not enough space for the basic request. So need to flush
  1834. * out the remainder and then wait for base + reserved.
  1835. */
  1836. wait_bytes = remain_actual + total_bytes;
  1837. need_wrap = true;
  1838. } else if (unlikely(total_bytes > remain_usable)) {
  1839. /*
  1840. * The base request will fit but the reserved space
  1841. * falls off the end. So we don't need an immediate wrap
  1842. * and only need to effectively wait for the reserved
  1843. * size space from the start of ringbuffer.
  1844. */
  1845. wait_bytes = remain_actual + req->reserved_space;
  1846. } else {
  1847. /* No wrapping required, just waiting. */
  1848. wait_bytes = total_bytes;
  1849. }
  1850. if (wait_bytes > ring->space) {
  1851. int ret = wait_for_space(req, wait_bytes);
  1852. if (unlikely(ret))
  1853. return ret;
  1854. }
  1855. if (unlikely(need_wrap)) {
  1856. GEM_BUG_ON(remain_actual > ring->space);
  1857. GEM_BUG_ON(ring->tail + remain_actual > ring->size);
  1858. /* Fill the tail with MI_NOOP */
  1859. memset(ring->vaddr + ring->tail, 0, remain_actual);
  1860. ring->tail = 0;
  1861. ring->space -= remain_actual;
  1862. }
  1863. ring->space -= bytes;
  1864. GEM_BUG_ON(ring->space < 0);
  1865. return 0;
  1866. }
  1867. /* Align the ring tail to a cacheline boundary */
  1868. int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
  1869. {
  1870. struct intel_ring *ring = req->ring;
  1871. int num_dwords =
  1872. (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
  1873. int ret;
  1874. if (num_dwords == 0)
  1875. return 0;
  1876. num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
  1877. ret = intel_ring_begin(req, num_dwords);
  1878. if (ret)
  1879. return ret;
  1880. while (num_dwords--)
  1881. intel_ring_emit(ring, MI_NOOP);
  1882. intel_ring_advance(ring);
  1883. return 0;
  1884. }
  1885. static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
  1886. {
  1887. struct drm_i915_private *dev_priv = request->i915;
  1888. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1889. /* Every tail move must follow the sequence below */
  1890. /* Disable notification that the ring is IDLE. The GT
  1891. * will then assume that it is busy and bring it out of rc6.
  1892. */
  1893. I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1894. _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1895. /* Clear the context id. Here be magic! */
  1896. I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
  1897. /* Wait for the ring not to be idle, i.e. for it to wake up. */
  1898. if (intel_wait_for_register_fw(dev_priv,
  1899. GEN6_BSD_SLEEP_PSMI_CONTROL,
  1900. GEN6_BSD_SLEEP_INDICATOR,
  1901. 0,
  1902. 50))
  1903. DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
  1904. /* Now that the ring is fully powered up, update the tail */
  1905. i9xx_submit_request(request);
  1906. /* Let the ring send IDLE messages to the GT again,
  1907. * and so let it sleep to conserve power when idle.
  1908. */
  1909. I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1910. _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1911. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1912. }
  1913. static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  1914. {
  1915. struct intel_ring *ring = req->ring;
  1916. uint32_t cmd;
  1917. int ret;
  1918. ret = intel_ring_begin(req, 4);
  1919. if (ret)
  1920. return ret;
  1921. cmd = MI_FLUSH_DW;
  1922. if (INTEL_GEN(req->i915) >= 8)
  1923. cmd += 1;
  1924. /* We always require a command barrier so that subsequent
  1925. * commands, such as breadcrumb interrupts, are strictly ordered
  1926. * wrt the contents of the write cache being flushed to memory
  1927. * (and thus being coherent from the CPU).
  1928. */
  1929. cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  1930. /*
  1931. * Bspec vol 1c.5 - video engine command streamer:
  1932. * "If ENABLED, all TLBs will be invalidated once the flush
  1933. * operation is complete. This bit is only valid when the
  1934. * Post-Sync Operation field is a value of 1h or 3h."
  1935. */
  1936. if (mode & EMIT_INVALIDATE)
  1937. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
  1938. intel_ring_emit(ring, cmd);
  1939. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  1940. if (INTEL_GEN(req->i915) >= 8) {
  1941. intel_ring_emit(ring, 0); /* upper addr */
  1942. intel_ring_emit(ring, 0); /* value */
  1943. } else {
  1944. intel_ring_emit(ring, 0);
  1945. intel_ring_emit(ring, MI_NOOP);
  1946. }
  1947. intel_ring_advance(ring);
  1948. return 0;
  1949. }
  1950. static int
  1951. gen8_emit_bb_start(struct drm_i915_gem_request *req,
  1952. u64 offset, u32 len,
  1953. unsigned int dispatch_flags)
  1954. {
  1955. struct intel_ring *ring = req->ring;
  1956. bool ppgtt = USES_PPGTT(req->i915) &&
  1957. !(dispatch_flags & I915_DISPATCH_SECURE);
  1958. int ret;
  1959. ret = intel_ring_begin(req, 4);
  1960. if (ret)
  1961. return ret;
  1962. /* FIXME(BDW): Address space and security selectors. */
  1963. intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
  1964. (dispatch_flags & I915_DISPATCH_RS ?
  1965. MI_BATCH_RESOURCE_STREAMER : 0));
  1966. intel_ring_emit(ring, lower_32_bits(offset));
  1967. intel_ring_emit(ring, upper_32_bits(offset));
  1968. intel_ring_emit(ring, MI_NOOP);
  1969. intel_ring_advance(ring);
  1970. return 0;
  1971. }
  1972. static int
  1973. hsw_emit_bb_start(struct drm_i915_gem_request *req,
  1974. u64 offset, u32 len,
  1975. unsigned int dispatch_flags)
  1976. {
  1977. struct intel_ring *ring = req->ring;
  1978. int ret;
  1979. ret = intel_ring_begin(req, 2);
  1980. if (ret)
  1981. return ret;
  1982. intel_ring_emit(ring,
  1983. MI_BATCH_BUFFER_START |
  1984. (dispatch_flags & I915_DISPATCH_SECURE ?
  1985. 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
  1986. (dispatch_flags & I915_DISPATCH_RS ?
  1987. MI_BATCH_RESOURCE_STREAMER : 0));
  1988. /* bit0-7 is the length on GEN6+ */
  1989. intel_ring_emit(ring, offset);
  1990. intel_ring_advance(ring);
  1991. return 0;
  1992. }
  1993. static int
  1994. gen6_emit_bb_start(struct drm_i915_gem_request *req,
  1995. u64 offset, u32 len,
  1996. unsigned int dispatch_flags)
  1997. {
  1998. struct intel_ring *ring = req->ring;
  1999. int ret;
  2000. ret = intel_ring_begin(req, 2);
  2001. if (ret)
  2002. return ret;
  2003. intel_ring_emit(ring,
  2004. MI_BATCH_BUFFER_START |
  2005. (dispatch_flags & I915_DISPATCH_SECURE ?
  2006. 0 : MI_BATCH_NON_SECURE_I965));
  2007. /* bit0-7 is the length on GEN6+ */
  2008. intel_ring_emit(ring, offset);
  2009. intel_ring_advance(ring);
  2010. return 0;
  2011. }
  2012. /* Blitter support (SandyBridge+) */
  2013. static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  2014. {
  2015. struct intel_ring *ring = req->ring;
  2016. uint32_t cmd;
  2017. int ret;
  2018. ret = intel_ring_begin(req, 4);
  2019. if (ret)
  2020. return ret;
  2021. cmd = MI_FLUSH_DW;
  2022. if (INTEL_GEN(req->i915) >= 8)
  2023. cmd += 1;
  2024. /* We always require a command barrier so that subsequent
  2025. * commands, such as breadcrumb interrupts, are strictly ordered
  2026. * wrt the contents of the write cache being flushed to memory
  2027. * (and thus being coherent from the CPU).
  2028. */
  2029. cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  2030. /*
  2031. * Bspec vol 1c.3 - blitter engine command streamer:
  2032. * "If ENABLED, all TLBs will be invalidated once the flush
  2033. * operation is complete. This bit is only valid when the
  2034. * Post-Sync Operation field is a value of 1h or 3h."
  2035. */
  2036. if (mode & EMIT_INVALIDATE)
  2037. cmd |= MI_INVALIDATE_TLB;
  2038. intel_ring_emit(ring, cmd);
  2039. intel_ring_emit(ring,
  2040. I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  2041. if (INTEL_GEN(req->i915) >= 8) {
  2042. intel_ring_emit(ring, 0); /* upper addr */
  2043. intel_ring_emit(ring, 0); /* value */
  2044. } else {
  2045. intel_ring_emit(ring, 0);
  2046. intel_ring_emit(ring, MI_NOOP);
  2047. }
  2048. intel_ring_advance(ring);
  2049. return 0;
  2050. }
  2051. static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
  2052. struct intel_engine_cs *engine)
  2053. {
  2054. struct drm_i915_gem_object *obj;
  2055. int ret, i;
  2056. if (!i915.semaphores)
  2057. return;
  2058. if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
  2059. struct i915_vma *vma;
  2060. obj = i915_gem_object_create(&dev_priv->drm, 4096);
  2061. if (IS_ERR(obj))
  2062. goto err;
  2063. vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
  2064. if (IS_ERR(vma))
  2065. goto err_obj;
  2066. ret = i915_gem_object_set_to_gtt_domain(obj, false);
  2067. if (ret)
  2068. goto err_obj;
  2069. ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
  2070. if (ret)
  2071. goto err_obj;
  2072. dev_priv->semaphore = vma;
  2073. }
  2074. if (INTEL_GEN(dev_priv) >= 8) {
  2075. u32 offset = i915_ggtt_offset(dev_priv->semaphore);
  2076. engine->semaphore.sync_to = gen8_ring_sync_to;
  2077. engine->semaphore.signal = gen8_xcs_signal;
  2078. for (i = 0; i < I915_NUM_ENGINES; i++) {
  2079. u32 ring_offset;
  2080. if (i != engine->id)
  2081. ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
  2082. else
  2083. ring_offset = MI_SEMAPHORE_SYNC_INVALID;
  2084. engine->semaphore.signal_ggtt[i] = ring_offset;
  2085. }
  2086. } else if (INTEL_GEN(dev_priv) >= 6) {
  2087. engine->semaphore.sync_to = gen6_ring_sync_to;
  2088. engine->semaphore.signal = gen6_signal;
  2089. /*
  2090. * The current semaphore is only applied on pre-gen8
  2091. * platform. And there is no VCS2 ring on the pre-gen8
  2092. * platform. So the semaphore between RCS and VCS2 is
  2093. * initialized as INVALID. Gen8 will initialize the
  2094. * sema between VCS2 and RCS later.
  2095. */
  2096. for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
  2097. static const struct {
  2098. u32 wait_mbox;
  2099. i915_reg_t mbox_reg;
  2100. } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
  2101. [RCS_HW] = {
  2102. [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
  2103. [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
  2104. [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
  2105. },
  2106. [VCS_HW] = {
  2107. [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
  2108. [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
  2109. [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
  2110. },
  2111. [BCS_HW] = {
  2112. [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
  2113. [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
  2114. [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
  2115. },
  2116. [VECS_HW] = {
  2117. [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
  2118. [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
  2119. [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
  2120. },
  2121. };
  2122. u32 wait_mbox;
  2123. i915_reg_t mbox_reg;
  2124. if (i == engine->hw_id) {
  2125. wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
  2126. mbox_reg = GEN6_NOSYNC;
  2127. } else {
  2128. wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
  2129. mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
  2130. }
  2131. engine->semaphore.mbox.wait[i] = wait_mbox;
  2132. engine->semaphore.mbox.signal[i] = mbox_reg;
  2133. }
  2134. }
  2135. return;
  2136. err_obj:
  2137. i915_gem_object_put(obj);
  2138. err:
  2139. DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
  2140. i915.semaphores = 0;
  2141. }
  2142. static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
  2143. struct intel_engine_cs *engine)
  2144. {
  2145. engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
  2146. if (INTEL_GEN(dev_priv) >= 8) {
  2147. engine->irq_enable = gen8_irq_enable;
  2148. engine->irq_disable = gen8_irq_disable;
  2149. engine->irq_seqno_barrier = gen6_seqno_barrier;
  2150. } else if (INTEL_GEN(dev_priv) >= 6) {
  2151. engine->irq_enable = gen6_irq_enable;
  2152. engine->irq_disable = gen6_irq_disable;
  2153. engine->irq_seqno_barrier = gen6_seqno_barrier;
  2154. } else if (INTEL_GEN(dev_priv) >= 5) {
  2155. engine->irq_enable = gen5_irq_enable;
  2156. engine->irq_disable = gen5_irq_disable;
  2157. engine->irq_seqno_barrier = gen5_seqno_barrier;
  2158. } else if (INTEL_GEN(dev_priv) >= 3) {
  2159. engine->irq_enable = i9xx_irq_enable;
  2160. engine->irq_disable = i9xx_irq_disable;
  2161. } else {
  2162. engine->irq_enable = i8xx_irq_enable;
  2163. engine->irq_disable = i8xx_irq_disable;
  2164. }
  2165. }
  2166. static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
  2167. struct intel_engine_cs *engine)
  2168. {
  2169. intel_ring_init_irq(dev_priv, engine);
  2170. intel_ring_init_semaphores(dev_priv, engine);
  2171. engine->init_hw = init_ring_common;
  2172. engine->reset_hw = reset_ring_common;
  2173. engine->emit_request = i9xx_emit_request;
  2174. if (i915.semaphores)
  2175. engine->emit_request = gen6_sema_emit_request;
  2176. engine->submit_request = i9xx_submit_request;
  2177. if (INTEL_GEN(dev_priv) >= 8)
  2178. engine->emit_bb_start = gen8_emit_bb_start;
  2179. else if (INTEL_GEN(dev_priv) >= 6)
  2180. engine->emit_bb_start = gen6_emit_bb_start;
  2181. else if (INTEL_GEN(dev_priv) >= 4)
  2182. engine->emit_bb_start = i965_emit_bb_start;
  2183. else if (IS_I830(dev_priv) || IS_845G(dev_priv))
  2184. engine->emit_bb_start = i830_emit_bb_start;
  2185. else
  2186. engine->emit_bb_start = i915_emit_bb_start;
  2187. }
  2188. int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
  2189. {
  2190. struct drm_i915_private *dev_priv = engine->i915;
  2191. int ret;
  2192. intel_ring_default_vfuncs(dev_priv, engine);
  2193. if (HAS_L3_DPF(dev_priv))
  2194. engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  2195. if (INTEL_GEN(dev_priv) >= 8) {
  2196. engine->init_context = intel_rcs_ctx_init;
  2197. engine->emit_request = gen8_render_emit_request;
  2198. engine->emit_flush = gen8_render_ring_flush;
  2199. if (i915.semaphores)
  2200. engine->semaphore.signal = gen8_rcs_signal;
  2201. } else if (INTEL_GEN(dev_priv) >= 6) {
  2202. engine->init_context = intel_rcs_ctx_init;
  2203. engine->emit_flush = gen7_render_ring_flush;
  2204. if (IS_GEN6(dev_priv))
  2205. engine->emit_flush = gen6_render_ring_flush;
  2206. } else if (IS_GEN5(dev_priv)) {
  2207. engine->emit_flush = gen4_render_ring_flush;
  2208. } else {
  2209. if (INTEL_GEN(dev_priv) < 4)
  2210. engine->emit_flush = gen2_render_ring_flush;
  2211. else
  2212. engine->emit_flush = gen4_render_ring_flush;
  2213. engine->irq_enable_mask = I915_USER_INTERRUPT;
  2214. }
  2215. if (IS_HASWELL(dev_priv))
  2216. engine->emit_bb_start = hsw_emit_bb_start;
  2217. engine->init_hw = init_render_ring;
  2218. engine->cleanup = render_ring_cleanup;
  2219. ret = intel_init_ring_buffer(engine);
  2220. if (ret)
  2221. return ret;
  2222. if (INTEL_GEN(dev_priv) >= 6) {
  2223. ret = intel_engine_create_scratch(engine, 4096);
  2224. if (ret)
  2225. return ret;
  2226. } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
  2227. ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
  2228. if (ret)
  2229. return ret;
  2230. }
  2231. return 0;
  2232. }
  2233. int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
  2234. {
  2235. struct drm_i915_private *dev_priv = engine->i915;
  2236. intel_ring_default_vfuncs(dev_priv, engine);
  2237. if (INTEL_GEN(dev_priv) >= 6) {
  2238. /* gen6 bsd needs a special wa for tail updates */
  2239. if (IS_GEN6(dev_priv))
  2240. engine->submit_request = gen6_bsd_submit_request;
  2241. engine->emit_flush = gen6_bsd_ring_flush;
  2242. if (INTEL_GEN(dev_priv) < 8)
  2243. engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
  2244. } else {
  2245. engine->mmio_base = BSD_RING_BASE;
  2246. engine->emit_flush = bsd_ring_flush;
  2247. if (IS_GEN5(dev_priv))
  2248. engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
  2249. else
  2250. engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
  2251. }
  2252. return intel_init_ring_buffer(engine);
  2253. }
  2254. /**
  2255. * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
  2256. */
  2257. int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
  2258. {
  2259. struct drm_i915_private *dev_priv = engine->i915;
  2260. intel_ring_default_vfuncs(dev_priv, engine);
  2261. engine->emit_flush = gen6_bsd_ring_flush;
  2262. return intel_init_ring_buffer(engine);
  2263. }
  2264. int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
  2265. {
  2266. struct drm_i915_private *dev_priv = engine->i915;
  2267. intel_ring_default_vfuncs(dev_priv, engine);
  2268. engine->emit_flush = gen6_ring_flush;
  2269. if (INTEL_GEN(dev_priv) < 8)
  2270. engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
  2271. return intel_init_ring_buffer(engine);
  2272. }
  2273. int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
  2274. {
  2275. struct drm_i915_private *dev_priv = engine->i915;
  2276. intel_ring_default_vfuncs(dev_priv, engine);
  2277. engine->emit_flush = gen6_ring_flush;
  2278. if (INTEL_GEN(dev_priv) < 8) {
  2279. engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
  2280. engine->irq_enable = hsw_vebox_irq_enable;
  2281. engine->irq_disable = hsw_vebox_irq_disable;
  2282. }
  2283. return intel_init_ring_buffer(engine);
  2284. }