intel_ringbuffer.c 77 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include <linux/log2.h>
  30. #include <drm/drmP.h>
  31. #include "i915_drv.h"
  32. #include <drm/i915_drm.h>
  33. #include "i915_trace.h"
  34. #include "intel_drv.h"
  35. /* Rough estimate of the typical request size, performing a flush,
  36. * set-context and then emitting the batch.
  37. */
  38. #define LEGACY_REQUEST_SIZE 200
  39. int __intel_ring_space(int head, int tail, int size)
  40. {
  41. int space = head - tail;
  42. if (space <= 0)
  43. space += size;
  44. return space - I915_RING_FREE_SPACE;
  45. }
  46. void intel_ring_update_space(struct intel_ring *ring)
  47. {
  48. if (ring->last_retired_head != -1) {
  49. ring->head = ring->last_retired_head;
  50. ring->last_retired_head = -1;
  51. }
  52. ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
  53. ring->tail, ring->size);
  54. }
  55. static int
  56. gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  57. {
  58. struct intel_ring *ring = req->ring;
  59. u32 cmd;
  60. int ret;
  61. cmd = MI_FLUSH;
  62. if (mode & EMIT_INVALIDATE)
  63. cmd |= MI_READ_FLUSH;
  64. ret = intel_ring_begin(req, 2);
  65. if (ret)
  66. return ret;
  67. intel_ring_emit(ring, cmd);
  68. intel_ring_emit(ring, MI_NOOP);
  69. intel_ring_advance(ring);
  70. return 0;
  71. }
  72. static int
  73. gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  74. {
  75. struct intel_ring *ring = req->ring;
  76. u32 cmd;
  77. int ret;
  78. /*
  79. * read/write caches:
  80. *
  81. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  82. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  83. * also flushed at 2d versus 3d pipeline switches.
  84. *
  85. * read-only caches:
  86. *
  87. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  88. * MI_READ_FLUSH is set, and is always flushed on 965.
  89. *
  90. * I915_GEM_DOMAIN_COMMAND may not exist?
  91. *
  92. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  93. * invalidated when MI_EXE_FLUSH is set.
  94. *
  95. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  96. * invalidated with every MI_FLUSH.
  97. *
  98. * TLBs:
  99. *
  100. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  101. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  102. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  103. * are flushed at any MI_FLUSH.
  104. */
  105. cmd = MI_FLUSH;
  106. if (mode & EMIT_INVALIDATE) {
  107. cmd |= MI_EXE_FLUSH;
  108. if (IS_G4X(req->i915) || IS_GEN5(req->i915))
  109. cmd |= MI_INVALIDATE_ISP;
  110. }
  111. ret = intel_ring_begin(req, 2);
  112. if (ret)
  113. return ret;
  114. intel_ring_emit(ring, cmd);
  115. intel_ring_emit(ring, MI_NOOP);
  116. intel_ring_advance(ring);
  117. return 0;
  118. }
  119. /**
  120. * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  121. * implementing two workarounds on gen6. From section 1.4.7.1
  122. * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
  123. *
  124. * [DevSNB-C+{W/A}] Before any depth stall flush (including those
  125. * produced by non-pipelined state commands), software needs to first
  126. * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
  127. * 0.
  128. *
  129. * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
  130. * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
  131. *
  132. * And the workaround for these two requires this workaround first:
  133. *
  134. * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
  135. * BEFORE the pipe-control with a post-sync op and no write-cache
  136. * flushes.
  137. *
  138. * And this last workaround is tricky because of the requirements on
  139. * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
  140. * volume 2 part 1:
  141. *
  142. * "1 of the following must also be set:
  143. * - Render Target Cache Flush Enable ([12] of DW1)
  144. * - Depth Cache Flush Enable ([0] of DW1)
  145. * - Stall at Pixel Scoreboard ([1] of DW1)
  146. * - Depth Stall ([13] of DW1)
  147. * - Post-Sync Operation ([13] of DW1)
  148. * - Notify Enable ([8] of DW1)"
  149. *
  150. * The cache flushes require the workaround flush that triggered this
  151. * one, so we can't use it. Depth stall would trigger the same.
  152. * Post-sync nonzero is what triggered this second workaround, so we
  153. * can't use that one either. Notify enable is IRQs, which aren't
  154. * really our business. That leaves only stall at scoreboard.
  155. */
  156. static int
  157. intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
  158. {
  159. struct intel_ring *ring = req->ring;
  160. u32 scratch_addr =
  161. req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
  162. int ret;
  163. ret = intel_ring_begin(req, 6);
  164. if (ret)
  165. return ret;
  166. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  167. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  168. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  169. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  170. intel_ring_emit(ring, 0); /* low dword */
  171. intel_ring_emit(ring, 0); /* high dword */
  172. intel_ring_emit(ring, MI_NOOP);
  173. intel_ring_advance(ring);
  174. ret = intel_ring_begin(req, 6);
  175. if (ret)
  176. return ret;
  177. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  178. intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
  179. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  180. intel_ring_emit(ring, 0);
  181. intel_ring_emit(ring, 0);
  182. intel_ring_emit(ring, MI_NOOP);
  183. intel_ring_advance(ring);
  184. return 0;
  185. }
  186. static int
  187. gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  188. {
  189. struct intel_ring *ring = req->ring;
  190. u32 scratch_addr =
  191. req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
  192. u32 flags = 0;
  193. int ret;
  194. /* Force SNB workarounds for PIPE_CONTROL flushes */
  195. ret = intel_emit_post_sync_nonzero_flush(req);
  196. if (ret)
  197. return ret;
  198. /* Just flush everything. Experiments have shown that reducing the
  199. * number of bits based on the write domains has little performance
  200. * impact.
  201. */
  202. if (mode & EMIT_FLUSH) {
  203. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  204. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  205. /*
  206. * Ensure that any following seqno writes only happen
  207. * when the render cache is indeed flushed.
  208. */
  209. flags |= PIPE_CONTROL_CS_STALL;
  210. }
  211. if (mode & EMIT_INVALIDATE) {
  212. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  213. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  214. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  215. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  216. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  217. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  218. /*
  219. * TLB invalidate requires a post-sync write.
  220. */
  221. flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
  222. }
  223. ret = intel_ring_begin(req, 4);
  224. if (ret)
  225. return ret;
  226. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  227. intel_ring_emit(ring, flags);
  228. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  229. intel_ring_emit(ring, 0);
  230. intel_ring_advance(ring);
  231. return 0;
  232. }
  233. static int
  234. gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
  235. {
  236. struct intel_ring *ring = req->ring;
  237. int ret;
  238. ret = intel_ring_begin(req, 4);
  239. if (ret)
  240. return ret;
  241. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  242. intel_ring_emit(ring,
  243. PIPE_CONTROL_CS_STALL |
  244. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  245. intel_ring_emit(ring, 0);
  246. intel_ring_emit(ring, 0);
  247. intel_ring_advance(ring);
  248. return 0;
  249. }
  250. static int
  251. gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  252. {
  253. struct intel_ring *ring = req->ring;
  254. u32 scratch_addr =
  255. req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
  256. u32 flags = 0;
  257. int ret;
  258. /*
  259. * Ensure that any following seqno writes only happen when the render
  260. * cache is indeed flushed.
  261. *
  262. * Workaround: 4th PIPE_CONTROL command (except the ones with only
  263. * read-cache invalidate bits set) must have the CS_STALL bit set. We
  264. * don't try to be clever and just set it unconditionally.
  265. */
  266. flags |= PIPE_CONTROL_CS_STALL;
  267. /* Just flush everything. Experiments have shown that reducing the
  268. * number of bits based on the write domains has little performance
  269. * impact.
  270. */
  271. if (mode & EMIT_FLUSH) {
  272. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  273. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  274. flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
  275. flags |= PIPE_CONTROL_FLUSH_ENABLE;
  276. }
  277. if (mode & EMIT_INVALIDATE) {
  278. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  279. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  280. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  281. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  282. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  283. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  284. flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
  285. /*
  286. * TLB invalidate requires a post-sync write.
  287. */
  288. flags |= PIPE_CONTROL_QW_WRITE;
  289. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  290. flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
  291. /* Workaround: we must issue a pipe_control with CS-stall bit
  292. * set before a pipe_control command that has the state cache
  293. * invalidate bit set. */
  294. gen7_render_ring_cs_stall_wa(req);
  295. }
  296. ret = intel_ring_begin(req, 4);
  297. if (ret)
  298. return ret;
  299. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  300. intel_ring_emit(ring, flags);
  301. intel_ring_emit(ring, scratch_addr);
  302. intel_ring_emit(ring, 0);
  303. intel_ring_advance(ring);
  304. return 0;
  305. }
  306. static int
  307. gen8_emit_pipe_control(struct drm_i915_gem_request *req,
  308. u32 flags, u32 scratch_addr)
  309. {
  310. struct intel_ring *ring = req->ring;
  311. int ret;
  312. ret = intel_ring_begin(req, 6);
  313. if (ret)
  314. return ret;
  315. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
  316. intel_ring_emit(ring, flags);
  317. intel_ring_emit(ring, scratch_addr);
  318. intel_ring_emit(ring, 0);
  319. intel_ring_emit(ring, 0);
  320. intel_ring_emit(ring, 0);
  321. intel_ring_advance(ring);
  322. return 0;
  323. }
  324. static int
  325. gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  326. {
  327. u32 scratch_addr =
  328. req->engine->scratch->node.start + 2 * CACHELINE_BYTES;
  329. u32 flags = 0;
  330. int ret;
  331. flags |= PIPE_CONTROL_CS_STALL;
  332. if (mode & EMIT_FLUSH) {
  333. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  334. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  335. flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
  336. flags |= PIPE_CONTROL_FLUSH_ENABLE;
  337. }
  338. if (mode & EMIT_INVALIDATE) {
  339. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  340. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  341. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  342. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  343. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  344. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  345. flags |= PIPE_CONTROL_QW_WRITE;
  346. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  347. /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
  348. ret = gen8_emit_pipe_control(req,
  349. PIPE_CONTROL_CS_STALL |
  350. PIPE_CONTROL_STALL_AT_SCOREBOARD,
  351. 0);
  352. if (ret)
  353. return ret;
  354. }
  355. return gen8_emit_pipe_control(req, flags, scratch_addr);
  356. }
  357. u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
  358. {
  359. struct drm_i915_private *dev_priv = engine->i915;
  360. u64 acthd;
  361. if (INTEL_GEN(dev_priv) >= 8)
  362. acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
  363. RING_ACTHD_UDW(engine->mmio_base));
  364. else if (INTEL_GEN(dev_priv) >= 4)
  365. acthd = I915_READ(RING_ACTHD(engine->mmio_base));
  366. else
  367. acthd = I915_READ(ACTHD);
  368. return acthd;
  369. }
  370. static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
  371. {
  372. struct drm_i915_private *dev_priv = engine->i915;
  373. u32 addr;
  374. addr = dev_priv->status_page_dmah->busaddr;
  375. if (INTEL_GEN(dev_priv) >= 4)
  376. addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  377. I915_WRITE(HWS_PGA, addr);
  378. }
  379. static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
  380. {
  381. struct drm_i915_private *dev_priv = engine->i915;
  382. i915_reg_t mmio;
  383. /* The ring status page addresses are no longer next to the rest of
  384. * the ring registers as of gen7.
  385. */
  386. if (IS_GEN7(dev_priv)) {
  387. switch (engine->id) {
  388. case RCS:
  389. mmio = RENDER_HWS_PGA_GEN7;
  390. break;
  391. case BCS:
  392. mmio = BLT_HWS_PGA_GEN7;
  393. break;
  394. /*
  395. * VCS2 actually doesn't exist on Gen7. Only shut up
  396. * gcc switch check warning
  397. */
  398. case VCS2:
  399. case VCS:
  400. mmio = BSD_HWS_PGA_GEN7;
  401. break;
  402. case VECS:
  403. mmio = VEBOX_HWS_PGA_GEN7;
  404. break;
  405. }
  406. } else if (IS_GEN6(dev_priv)) {
  407. mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
  408. } else {
  409. /* XXX: gen8 returns to sanity */
  410. mmio = RING_HWS_PGA(engine->mmio_base);
  411. }
  412. I915_WRITE(mmio, engine->status_page.ggtt_offset);
  413. POSTING_READ(mmio);
  414. /*
  415. * Flush the TLB for this page
  416. *
  417. * FIXME: These two bits have disappeared on gen8, so a question
  418. * arises: do we still need this and if so how should we go about
  419. * invalidating the TLB?
  420. */
  421. if (IS_GEN(dev_priv, 6, 7)) {
  422. i915_reg_t reg = RING_INSTPM(engine->mmio_base);
  423. /* ring should be idle before issuing a sync flush*/
  424. WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
  425. I915_WRITE(reg,
  426. _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
  427. INSTPM_SYNC_FLUSH));
  428. if (intel_wait_for_register(dev_priv,
  429. reg, INSTPM_SYNC_FLUSH, 0,
  430. 1000))
  431. DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
  432. engine->name);
  433. }
  434. }
  435. static bool stop_ring(struct intel_engine_cs *engine)
  436. {
  437. struct drm_i915_private *dev_priv = engine->i915;
  438. if (!IS_GEN2(dev_priv)) {
  439. I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
  440. if (intel_wait_for_register(dev_priv,
  441. RING_MI_MODE(engine->mmio_base),
  442. MODE_IDLE,
  443. MODE_IDLE,
  444. 1000)) {
  445. DRM_ERROR("%s : timed out trying to stop ring\n",
  446. engine->name);
  447. /* Sometimes we observe that the idle flag is not
  448. * set even though the ring is empty. So double
  449. * check before giving up.
  450. */
  451. if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
  452. return false;
  453. }
  454. }
  455. I915_WRITE_CTL(engine, 0);
  456. I915_WRITE_HEAD(engine, 0);
  457. I915_WRITE_TAIL(engine, 0);
  458. if (!IS_GEN2(dev_priv)) {
  459. (void)I915_READ_CTL(engine);
  460. I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
  461. }
  462. return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
  463. }
  464. static int init_ring_common(struct intel_engine_cs *engine)
  465. {
  466. struct drm_i915_private *dev_priv = engine->i915;
  467. struct intel_ring *ring = engine->buffer;
  468. int ret = 0;
  469. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  470. if (!stop_ring(engine)) {
  471. /* G45 ring initialization often fails to reset head to zero */
  472. DRM_DEBUG_KMS("%s head not reset to zero "
  473. "ctl %08x head %08x tail %08x start %08x\n",
  474. engine->name,
  475. I915_READ_CTL(engine),
  476. I915_READ_HEAD(engine),
  477. I915_READ_TAIL(engine),
  478. I915_READ_START(engine));
  479. if (!stop_ring(engine)) {
  480. DRM_ERROR("failed to set %s head to zero "
  481. "ctl %08x head %08x tail %08x start %08x\n",
  482. engine->name,
  483. I915_READ_CTL(engine),
  484. I915_READ_HEAD(engine),
  485. I915_READ_TAIL(engine),
  486. I915_READ_START(engine));
  487. ret = -EIO;
  488. goto out;
  489. }
  490. }
  491. if (I915_NEED_GFX_HWS(dev_priv))
  492. intel_ring_setup_status_page(engine);
  493. else
  494. ring_setup_phys_status_page(engine);
  495. /* Enforce ordering by reading HEAD register back */
  496. I915_READ_HEAD(engine);
  497. /* Initialize the ring. This must happen _after_ we've cleared the ring
  498. * registers with the above sequence (the readback of the HEAD registers
  499. * also enforces ordering), otherwise the hw might lose the new ring
  500. * register values. */
  501. I915_WRITE_START(engine, ring->vma->node.start);
  502. /* WaClearRingBufHeadRegAtInit:ctg,elk */
  503. if (I915_READ_HEAD(engine))
  504. DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
  505. engine->name, I915_READ_HEAD(engine));
  506. I915_WRITE_HEAD(engine, 0);
  507. (void)I915_READ_HEAD(engine);
  508. I915_WRITE_CTL(engine,
  509. ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
  510. | RING_VALID);
  511. /* If the head is still not zero, the ring is dead */
  512. if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
  513. I915_READ_START(engine) == ring->vma->node.start &&
  514. (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
  515. DRM_ERROR("%s initialization failed "
  516. "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08llx]\n",
  517. engine->name,
  518. I915_READ_CTL(engine),
  519. I915_READ_CTL(engine) & RING_VALID,
  520. I915_READ_HEAD(engine), I915_READ_TAIL(engine),
  521. I915_READ_START(engine),
  522. ring->vma->node.start);
  523. ret = -EIO;
  524. goto out;
  525. }
  526. ring->last_retired_head = -1;
  527. ring->head = I915_READ_HEAD(engine);
  528. ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
  529. intel_ring_update_space(ring);
  530. intel_engine_init_hangcheck(engine);
  531. out:
  532. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  533. return ret;
  534. }
  535. void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
  536. {
  537. struct i915_vma *vma;
  538. vma = fetch_and_zero(&engine->scratch);
  539. if (!vma)
  540. return;
  541. i915_vma_unpin(vma);
  542. i915_vma_put(vma);
  543. }
  544. int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
  545. {
  546. struct drm_i915_gem_object *obj;
  547. struct i915_vma *vma;
  548. int ret;
  549. WARN_ON(engine->scratch);
  550. obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
  551. if (!obj)
  552. obj = i915_gem_object_create(&engine->i915->drm, size);
  553. if (IS_ERR(obj)) {
  554. DRM_ERROR("Failed to allocate scratch page\n");
  555. return PTR_ERR(obj);
  556. }
  557. vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
  558. if (IS_ERR(vma)) {
  559. ret = PTR_ERR(vma);
  560. goto err_unref;
  561. }
  562. ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
  563. if (ret)
  564. goto err_unref;
  565. engine->scratch = vma;
  566. DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08llx\n",
  567. engine->name, vma->node.start);
  568. return 0;
  569. err_unref:
  570. i915_gem_object_put(obj);
  571. return ret;
  572. }
  573. static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
  574. {
  575. struct intel_ring *ring = req->ring;
  576. struct i915_workarounds *w = &req->i915->workarounds;
  577. int ret, i;
  578. if (w->count == 0)
  579. return 0;
  580. ret = req->engine->emit_flush(req, EMIT_BARRIER);
  581. if (ret)
  582. return ret;
  583. ret = intel_ring_begin(req, (w->count * 2 + 2));
  584. if (ret)
  585. return ret;
  586. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
  587. for (i = 0; i < w->count; i++) {
  588. intel_ring_emit_reg(ring, w->reg[i].addr);
  589. intel_ring_emit(ring, w->reg[i].value);
  590. }
  591. intel_ring_emit(ring, MI_NOOP);
  592. intel_ring_advance(ring);
  593. ret = req->engine->emit_flush(req, EMIT_BARRIER);
  594. if (ret)
  595. return ret;
  596. DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
  597. return 0;
  598. }
  599. static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
  600. {
  601. int ret;
  602. ret = intel_ring_workarounds_emit(req);
  603. if (ret != 0)
  604. return ret;
  605. ret = i915_gem_render_state_init(req);
  606. if (ret)
  607. return ret;
  608. return 0;
  609. }
  610. static int wa_add(struct drm_i915_private *dev_priv,
  611. i915_reg_t addr,
  612. const u32 mask, const u32 val)
  613. {
  614. const u32 idx = dev_priv->workarounds.count;
  615. if (WARN_ON(idx >= I915_MAX_WA_REGS))
  616. return -ENOSPC;
  617. dev_priv->workarounds.reg[idx].addr = addr;
  618. dev_priv->workarounds.reg[idx].value = val;
  619. dev_priv->workarounds.reg[idx].mask = mask;
  620. dev_priv->workarounds.count++;
  621. return 0;
  622. }
  623. #define WA_REG(addr, mask, val) do { \
  624. const int r = wa_add(dev_priv, (addr), (mask), (val)); \
  625. if (r) \
  626. return r; \
  627. } while (0)
  628. #define WA_SET_BIT_MASKED(addr, mask) \
  629. WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
  630. #define WA_CLR_BIT_MASKED(addr, mask) \
  631. WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
  632. #define WA_SET_FIELD_MASKED(addr, mask, value) \
  633. WA_REG(addr, mask, _MASKED_FIELD(mask, value))
  634. #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
  635. #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
  636. #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
  637. static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
  638. i915_reg_t reg)
  639. {
  640. struct drm_i915_private *dev_priv = engine->i915;
  641. struct i915_workarounds *wa = &dev_priv->workarounds;
  642. const uint32_t index = wa->hw_whitelist_count[engine->id];
  643. if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
  644. return -EINVAL;
  645. WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
  646. i915_mmio_reg_offset(reg));
  647. wa->hw_whitelist_count[engine->id]++;
  648. return 0;
  649. }
  650. static int gen8_init_workarounds(struct intel_engine_cs *engine)
  651. {
  652. struct drm_i915_private *dev_priv = engine->i915;
  653. WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
  654. /* WaDisableAsyncFlipPerfMode:bdw,chv */
  655. WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
  656. /* WaDisablePartialInstShootdown:bdw,chv */
  657. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
  658. PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
  659. /* Use Force Non-Coherent whenever executing a 3D context. This is a
  660. * workaround for for a possible hang in the unlikely event a TLB
  661. * invalidation occurs during a PSD flush.
  662. */
  663. /* WaForceEnableNonCoherent:bdw,chv */
  664. /* WaHdcDisableFetchWhenMasked:bdw,chv */
  665. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  666. HDC_DONOT_FETCH_MEM_WHEN_MASKED |
  667. HDC_FORCE_NON_COHERENT);
  668. /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
  669. * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
  670. * polygons in the same 8x4 pixel/sample area to be processed without
  671. * stalling waiting for the earlier ones to write to Hierarchical Z
  672. * buffer."
  673. *
  674. * This optimization is off by default for BDW and CHV; turn it on.
  675. */
  676. WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
  677. /* Wa4x4STCOptimizationDisable:bdw,chv */
  678. WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
  679. /*
  680. * BSpec recommends 8x4 when MSAA is used,
  681. * however in practice 16x4 seems fastest.
  682. *
  683. * Note that PS/WM thread counts depend on the WIZ hashing
  684. * disable bit, which we don't touch here, but it's good
  685. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  686. */
  687. WA_SET_FIELD_MASKED(GEN7_GT_MODE,
  688. GEN6_WIZ_HASHING_MASK,
  689. GEN6_WIZ_HASHING_16x4);
  690. return 0;
  691. }
  692. static int bdw_init_workarounds(struct intel_engine_cs *engine)
  693. {
  694. struct drm_i915_private *dev_priv = engine->i915;
  695. int ret;
  696. ret = gen8_init_workarounds(engine);
  697. if (ret)
  698. return ret;
  699. /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
  700. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
  701. /* WaDisableDopClockGating:bdw */
  702. WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
  703. DOP_CLOCK_GATING_DISABLE);
  704. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
  705. GEN8_SAMPLER_POWER_BYPASS_DIS);
  706. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  707. /* WaForceContextSaveRestoreNonCoherent:bdw */
  708. HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
  709. /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
  710. (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
  711. return 0;
  712. }
  713. static int chv_init_workarounds(struct intel_engine_cs *engine)
  714. {
  715. struct drm_i915_private *dev_priv = engine->i915;
  716. int ret;
  717. ret = gen8_init_workarounds(engine);
  718. if (ret)
  719. return ret;
  720. /* WaDisableThreadStallDopClockGating:chv */
  721. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
  722. /* Improve HiZ throughput on CHV. */
  723. WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
  724. return 0;
  725. }
  726. static int gen9_init_workarounds(struct intel_engine_cs *engine)
  727. {
  728. struct drm_i915_private *dev_priv = engine->i915;
  729. int ret;
  730. /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
  731. I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
  732. /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
  733. I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
  734. GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
  735. /* WaDisableKillLogic:bxt,skl,kbl */
  736. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
  737. ECOCHK_DIS_TLB);
  738. /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
  739. /* WaDisablePartialInstShootdown:skl,bxt,kbl */
  740. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
  741. FLOW_CONTROL_ENABLE |
  742. PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
  743. /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
  744. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
  745. GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
  746. /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
  747. if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
  748. IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
  749. WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
  750. GEN9_DG_MIRROR_FIX_ENABLE);
  751. /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
  752. if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
  753. IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
  754. WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
  755. GEN9_RHWO_OPTIMIZATION_DISABLE);
  756. /*
  757. * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
  758. * but we do that in per ctx batchbuffer as there is an issue
  759. * with this register not getting restored on ctx restore
  760. */
  761. }
  762. /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
  763. /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
  764. WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
  765. GEN9_ENABLE_YV12_BUGFIX |
  766. GEN9_ENABLE_GPGPU_PREEMPTION);
  767. /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
  768. /* WaDisablePartialResolveInVc:skl,bxt,kbl */
  769. WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
  770. GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
  771. /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
  772. WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
  773. GEN9_CCS_TLB_PREFETCH_ENABLE);
  774. /* WaDisableMaskBasedCammingInRCC:skl,bxt */
  775. if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
  776. IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
  777. WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
  778. PIXEL_MASK_CAMMING_DISABLE);
  779. /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
  780. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  781. HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
  782. HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
  783. /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
  784. * both tied to WaForceContextSaveRestoreNonCoherent
  785. * in some hsds for skl. We keep the tie for all gen9. The
  786. * documentation is a bit hazy and so we want to get common behaviour,
  787. * even though there is no clear evidence we would need both on kbl/bxt.
  788. * This area has been source of system hangs so we play it safe
  789. * and mimic the skl regardless of what bspec says.
  790. *
  791. * Use Force Non-Coherent whenever executing a 3D context. This
  792. * is a workaround for a possible hang in the unlikely event
  793. * a TLB invalidation occurs during a PSD flush.
  794. */
  795. /* WaForceEnableNonCoherent:skl,bxt,kbl */
  796. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  797. HDC_FORCE_NON_COHERENT);
  798. /* WaDisableHDCInvalidation:skl,bxt,kbl */
  799. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
  800. BDW_DISABLE_HDC_INVALIDATION);
  801. /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
  802. if (IS_SKYLAKE(dev_priv) ||
  803. IS_KABYLAKE(dev_priv) ||
  804. IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
  805. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
  806. GEN8_SAMPLER_POWER_BYPASS_DIS);
  807. /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
  808. WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
  809. /* WaOCLCoherentLineFlush:skl,bxt,kbl */
  810. I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
  811. GEN8_LQSC_FLUSH_COHERENT_LINES));
  812. /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
  813. ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
  814. if (ret)
  815. return ret;
  816. /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
  817. ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
  818. if (ret)
  819. return ret;
  820. /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
  821. ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
  822. if (ret)
  823. return ret;
  824. return 0;
  825. }
  826. static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
  827. {
  828. struct drm_i915_private *dev_priv = engine->i915;
  829. u8 vals[3] = { 0, 0, 0 };
  830. unsigned int i;
  831. for (i = 0; i < 3; i++) {
  832. u8 ss;
  833. /*
  834. * Only consider slices where one, and only one, subslice has 7
  835. * EUs
  836. */
  837. if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
  838. continue;
  839. /*
  840. * subslice_7eu[i] != 0 (because of the check above) and
  841. * ss_max == 4 (maximum number of subslices possible per slice)
  842. *
  843. * -> 0 <= ss <= 3;
  844. */
  845. ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
  846. vals[i] = 3 - ss;
  847. }
  848. if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
  849. return 0;
  850. /* Tune IZ hashing. See intel_device_info_runtime_init() */
  851. WA_SET_FIELD_MASKED(GEN7_GT_MODE,
  852. GEN9_IZ_HASHING_MASK(2) |
  853. GEN9_IZ_HASHING_MASK(1) |
  854. GEN9_IZ_HASHING_MASK(0),
  855. GEN9_IZ_HASHING(2, vals[2]) |
  856. GEN9_IZ_HASHING(1, vals[1]) |
  857. GEN9_IZ_HASHING(0, vals[0]));
  858. return 0;
  859. }
  860. static int skl_init_workarounds(struct intel_engine_cs *engine)
  861. {
  862. struct drm_i915_private *dev_priv = engine->i915;
  863. int ret;
  864. ret = gen9_init_workarounds(engine);
  865. if (ret)
  866. return ret;
  867. /*
  868. * Actual WA is to disable percontext preemption granularity control
  869. * until D0 which is the default case so this is equivalent to
  870. * !WaDisablePerCtxtPreemptionGranularityControl:skl
  871. */
  872. if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
  873. I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
  874. _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
  875. }
  876. if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
  877. /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
  878. I915_WRITE(FF_SLICE_CS_CHICKEN2,
  879. _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
  880. }
  881. /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
  882. * involving this register should also be added to WA batch as required.
  883. */
  884. if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
  885. /* WaDisableLSQCROPERFforOCL:skl */
  886. I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
  887. GEN8_LQSC_RO_PERF_DIS);
  888. /* WaEnableGapsTsvCreditFix:skl */
  889. if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
  890. I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
  891. GEN9_GAPS_TSV_CREDIT_DISABLE));
  892. }
  893. /* WaDisablePowerCompilerClockGating:skl */
  894. if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
  895. WA_SET_BIT_MASKED(HIZ_CHICKEN,
  896. BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
  897. /* WaBarrierPerformanceFixDisable:skl */
  898. if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
  899. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  900. HDC_FENCE_DEST_SLM_DISABLE |
  901. HDC_BARRIER_PERFORMANCE_DISABLE);
  902. /* WaDisableSbeCacheDispatchPortSharing:skl */
  903. if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
  904. WA_SET_BIT_MASKED(
  905. GEN7_HALF_SLICE_CHICKEN1,
  906. GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
  907. /* WaDisableGafsUnitClkGating:skl */
  908. WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
  909. /* WaInPlaceDecompressionHang:skl */
  910. if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
  911. WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
  912. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
  913. /* WaDisableLSQCROPERFforOCL:skl */
  914. ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
  915. if (ret)
  916. return ret;
  917. return skl_tune_iz_hashing(engine);
  918. }
  919. static int bxt_init_workarounds(struct intel_engine_cs *engine)
  920. {
  921. struct drm_i915_private *dev_priv = engine->i915;
  922. int ret;
  923. ret = gen9_init_workarounds(engine);
  924. if (ret)
  925. return ret;
  926. /* WaStoreMultiplePTEenable:bxt */
  927. /* This is a requirement according to Hardware specification */
  928. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
  929. I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
  930. /* WaSetClckGatingDisableMedia:bxt */
  931. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
  932. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
  933. ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
  934. }
  935. /* WaDisableThreadStallDopClockGating:bxt */
  936. WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
  937. STALL_DOP_GATING_DISABLE);
  938. /* WaDisablePooledEuLoadBalancingFix:bxt */
  939. if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
  940. WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
  941. GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
  942. }
  943. /* WaDisableSbeCacheDispatchPortSharing:bxt */
  944. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
  945. WA_SET_BIT_MASKED(
  946. GEN7_HALF_SLICE_CHICKEN1,
  947. GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
  948. }
  949. /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
  950. /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
  951. /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
  952. /* WaDisableLSQCROPERFforOCL:bxt */
  953. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
  954. ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
  955. if (ret)
  956. return ret;
  957. ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
  958. if (ret)
  959. return ret;
  960. }
  961. /* WaProgramL3SqcReg1DefaultForPerf:bxt */
  962. if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
  963. I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
  964. L3_HIGH_PRIO_CREDITS(2));
  965. /* WaToEnableHwFixForPushConstHWBug:bxt */
  966. if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
  967. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  968. GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
  969. /* WaInPlaceDecompressionHang:bxt */
  970. if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
  971. WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
  972. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
  973. return 0;
  974. }
  975. static int kbl_init_workarounds(struct intel_engine_cs *engine)
  976. {
  977. struct drm_i915_private *dev_priv = engine->i915;
  978. int ret;
  979. ret = gen9_init_workarounds(engine);
  980. if (ret)
  981. return ret;
  982. /* WaEnableGapsTsvCreditFix:kbl */
  983. I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
  984. GEN9_GAPS_TSV_CREDIT_DISABLE));
  985. /* WaDisableDynamicCreditSharing:kbl */
  986. if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
  987. WA_SET_BIT(GAMT_CHKN_BIT_REG,
  988. GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
  989. /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
  990. if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
  991. WA_SET_BIT_MASKED(HDC_CHICKEN0,
  992. HDC_FENCE_DEST_SLM_DISABLE);
  993. /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
  994. * involving this register should also be added to WA batch as required.
  995. */
  996. if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
  997. /* WaDisableLSQCROPERFforOCL:kbl */
  998. I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
  999. GEN8_LQSC_RO_PERF_DIS);
  1000. /* WaToEnableHwFixForPushConstHWBug:kbl */
  1001. if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
  1002. WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
  1003. GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
  1004. /* WaDisableGafsUnitClkGating:kbl */
  1005. WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
  1006. /* WaDisableSbeCacheDispatchPortSharing:kbl */
  1007. WA_SET_BIT_MASKED(
  1008. GEN7_HALF_SLICE_CHICKEN1,
  1009. GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
  1010. /* WaInPlaceDecompressionHang:kbl */
  1011. WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
  1012. GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
  1013. /* WaDisableLSQCROPERFforOCL:kbl */
  1014. ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
  1015. if (ret)
  1016. return ret;
  1017. return 0;
  1018. }
  1019. int init_workarounds_ring(struct intel_engine_cs *engine)
  1020. {
  1021. struct drm_i915_private *dev_priv = engine->i915;
  1022. WARN_ON(engine->id != RCS);
  1023. dev_priv->workarounds.count = 0;
  1024. dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
  1025. if (IS_BROADWELL(dev_priv))
  1026. return bdw_init_workarounds(engine);
  1027. if (IS_CHERRYVIEW(dev_priv))
  1028. return chv_init_workarounds(engine);
  1029. if (IS_SKYLAKE(dev_priv))
  1030. return skl_init_workarounds(engine);
  1031. if (IS_BROXTON(dev_priv))
  1032. return bxt_init_workarounds(engine);
  1033. if (IS_KABYLAKE(dev_priv))
  1034. return kbl_init_workarounds(engine);
  1035. return 0;
  1036. }
  1037. static int init_render_ring(struct intel_engine_cs *engine)
  1038. {
  1039. struct drm_i915_private *dev_priv = engine->i915;
  1040. int ret = init_ring_common(engine);
  1041. if (ret)
  1042. return ret;
  1043. /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
  1044. if (IS_GEN(dev_priv, 4, 6))
  1045. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  1046. /* We need to disable the AsyncFlip performance optimisations in order
  1047. * to use MI_WAIT_FOR_EVENT within the CS. It should already be
  1048. * programmed to '1' on all products.
  1049. *
  1050. * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
  1051. */
  1052. if (IS_GEN(dev_priv, 6, 7))
  1053. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  1054. /* Required for the hardware to program scanline values for waiting */
  1055. /* WaEnableFlushTlbInvalidationMode:snb */
  1056. if (IS_GEN6(dev_priv))
  1057. I915_WRITE(GFX_MODE,
  1058. _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
  1059. /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
  1060. if (IS_GEN7(dev_priv))
  1061. I915_WRITE(GFX_MODE_GEN7,
  1062. _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
  1063. _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  1064. if (IS_GEN6(dev_priv)) {
  1065. /* From the Sandybridge PRM, volume 1 part 3, page 24:
  1066. * "If this bit is set, STCunit will have LRA as replacement
  1067. * policy. [...] This bit must be reset. LRA replacement
  1068. * policy is not supported."
  1069. */
  1070. I915_WRITE(CACHE_MODE_0,
  1071. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  1072. }
  1073. if (IS_GEN(dev_priv, 6, 7))
  1074. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  1075. if (INTEL_INFO(dev_priv)->gen >= 6)
  1076. I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
  1077. return init_workarounds_ring(engine);
  1078. }
  1079. static void render_ring_cleanup(struct intel_engine_cs *engine)
  1080. {
  1081. struct drm_i915_private *dev_priv = engine->i915;
  1082. if (dev_priv->semaphore_obj) {
  1083. i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
  1084. i915_gem_object_put(dev_priv->semaphore_obj);
  1085. dev_priv->semaphore_obj = NULL;
  1086. }
  1087. intel_engine_cleanup_scratch(engine);
  1088. }
  1089. static int gen8_rcs_signal(struct drm_i915_gem_request *req)
  1090. {
  1091. struct intel_ring *ring = req->ring;
  1092. struct drm_i915_private *dev_priv = req->i915;
  1093. struct intel_engine_cs *waiter;
  1094. enum intel_engine_id id;
  1095. int ret, num_rings;
  1096. num_rings = INTEL_INFO(dev_priv)->num_rings;
  1097. ret = intel_ring_begin(req, (num_rings-1) * 8);
  1098. if (ret)
  1099. return ret;
  1100. for_each_engine_id(waiter, dev_priv, id) {
  1101. u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
  1102. if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
  1103. continue;
  1104. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
  1105. intel_ring_emit(ring,
  1106. PIPE_CONTROL_GLOBAL_GTT_IVB |
  1107. PIPE_CONTROL_QW_WRITE |
  1108. PIPE_CONTROL_CS_STALL);
  1109. intel_ring_emit(ring, lower_32_bits(gtt_offset));
  1110. intel_ring_emit(ring, upper_32_bits(gtt_offset));
  1111. intel_ring_emit(ring, req->fence.seqno);
  1112. intel_ring_emit(ring, 0);
  1113. intel_ring_emit(ring,
  1114. MI_SEMAPHORE_SIGNAL |
  1115. MI_SEMAPHORE_TARGET(waiter->hw_id));
  1116. intel_ring_emit(ring, 0);
  1117. }
  1118. intel_ring_advance(ring);
  1119. return 0;
  1120. }
  1121. static int gen8_xcs_signal(struct drm_i915_gem_request *req)
  1122. {
  1123. struct intel_ring *ring = req->ring;
  1124. struct drm_i915_private *dev_priv = req->i915;
  1125. struct intel_engine_cs *waiter;
  1126. enum intel_engine_id id;
  1127. int ret, num_rings;
  1128. num_rings = INTEL_INFO(dev_priv)->num_rings;
  1129. ret = intel_ring_begin(req, (num_rings-1) * 6);
  1130. if (ret)
  1131. return ret;
  1132. for_each_engine_id(waiter, dev_priv, id) {
  1133. u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
  1134. if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
  1135. continue;
  1136. intel_ring_emit(ring,
  1137. (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
  1138. intel_ring_emit(ring,
  1139. lower_32_bits(gtt_offset) |
  1140. MI_FLUSH_DW_USE_GTT);
  1141. intel_ring_emit(ring, upper_32_bits(gtt_offset));
  1142. intel_ring_emit(ring, req->fence.seqno);
  1143. intel_ring_emit(ring,
  1144. MI_SEMAPHORE_SIGNAL |
  1145. MI_SEMAPHORE_TARGET(waiter->hw_id));
  1146. intel_ring_emit(ring, 0);
  1147. }
  1148. intel_ring_advance(ring);
  1149. return 0;
  1150. }
  1151. static int gen6_signal(struct drm_i915_gem_request *req)
  1152. {
  1153. struct intel_ring *ring = req->ring;
  1154. struct drm_i915_private *dev_priv = req->i915;
  1155. struct intel_engine_cs *useless;
  1156. enum intel_engine_id id;
  1157. int ret, num_rings;
  1158. num_rings = INTEL_INFO(dev_priv)->num_rings;
  1159. ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
  1160. if (ret)
  1161. return ret;
  1162. for_each_engine_id(useless, dev_priv, id) {
  1163. i915_reg_t mbox_reg = req->engine->semaphore.mbox.signal[id];
  1164. if (i915_mmio_reg_valid(mbox_reg)) {
  1165. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  1166. intel_ring_emit_reg(ring, mbox_reg);
  1167. intel_ring_emit(ring, req->fence.seqno);
  1168. }
  1169. }
  1170. /* If num_dwords was rounded, make sure the tail pointer is correct */
  1171. if (num_rings % 2 == 0)
  1172. intel_ring_emit(ring, MI_NOOP);
  1173. intel_ring_advance(ring);
  1174. return 0;
  1175. }
  1176. static void i9xx_submit_request(struct drm_i915_gem_request *request)
  1177. {
  1178. struct drm_i915_private *dev_priv = request->i915;
  1179. I915_WRITE_TAIL(request->engine,
  1180. intel_ring_offset(request->ring, request->tail));
  1181. }
  1182. static int i9xx_emit_request(struct drm_i915_gem_request *req)
  1183. {
  1184. struct intel_ring *ring = req->ring;
  1185. int ret;
  1186. ret = intel_ring_begin(req, 4);
  1187. if (ret)
  1188. return ret;
  1189. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  1190. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  1191. intel_ring_emit(ring, req->fence.seqno);
  1192. intel_ring_emit(ring, MI_USER_INTERRUPT);
  1193. intel_ring_advance(ring);
  1194. req->tail = ring->tail;
  1195. return 0;
  1196. }
  1197. /**
  1198. * gen6_sema_emit_request - Update the semaphore mailbox registers
  1199. *
  1200. * @request - request to write to the ring
  1201. *
  1202. * Update the mailbox registers in the *other* rings with the current seqno.
  1203. * This acts like a signal in the canonical semaphore.
  1204. */
  1205. static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
  1206. {
  1207. int ret;
  1208. ret = req->engine->semaphore.signal(req);
  1209. if (ret)
  1210. return ret;
  1211. return i9xx_emit_request(req);
  1212. }
  1213. static int gen8_render_emit_request(struct drm_i915_gem_request *req)
  1214. {
  1215. struct intel_engine_cs *engine = req->engine;
  1216. struct intel_ring *ring = req->ring;
  1217. int ret;
  1218. if (engine->semaphore.signal) {
  1219. ret = engine->semaphore.signal(req);
  1220. if (ret)
  1221. return ret;
  1222. }
  1223. ret = intel_ring_begin(req, 8);
  1224. if (ret)
  1225. return ret;
  1226. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
  1227. intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
  1228. PIPE_CONTROL_CS_STALL |
  1229. PIPE_CONTROL_QW_WRITE));
  1230. intel_ring_emit(ring, intel_hws_seqno_address(engine));
  1231. intel_ring_emit(ring, 0);
  1232. intel_ring_emit(ring, i915_gem_request_get_seqno(req));
  1233. /* We're thrashing one dword of HWS. */
  1234. intel_ring_emit(ring, 0);
  1235. intel_ring_emit(ring, MI_USER_INTERRUPT);
  1236. intel_ring_emit(ring, MI_NOOP);
  1237. intel_ring_advance(ring);
  1238. req->tail = ring->tail;
  1239. return 0;
  1240. }
  1241. /**
  1242. * intel_ring_sync - sync the waiter to the signaller on seqno
  1243. *
  1244. * @waiter - ring that is waiting
  1245. * @signaller - ring which has, or will signal
  1246. * @seqno - seqno which the waiter will block on
  1247. */
  1248. static int
  1249. gen8_ring_sync_to(struct drm_i915_gem_request *req,
  1250. struct drm_i915_gem_request *signal)
  1251. {
  1252. struct intel_ring *ring = req->ring;
  1253. struct drm_i915_private *dev_priv = req->i915;
  1254. u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
  1255. struct i915_hw_ppgtt *ppgtt;
  1256. int ret;
  1257. ret = intel_ring_begin(req, 4);
  1258. if (ret)
  1259. return ret;
  1260. intel_ring_emit(ring,
  1261. MI_SEMAPHORE_WAIT |
  1262. MI_SEMAPHORE_GLOBAL_GTT |
  1263. MI_SEMAPHORE_SAD_GTE_SDD);
  1264. intel_ring_emit(ring, signal->fence.seqno);
  1265. intel_ring_emit(ring, lower_32_bits(offset));
  1266. intel_ring_emit(ring, upper_32_bits(offset));
  1267. intel_ring_advance(ring);
  1268. /* When the !RCS engines idle waiting upon a semaphore, they lose their
  1269. * pagetables and we must reload them before executing the batch.
  1270. * We do this on the i915_switch_context() following the wait and
  1271. * before the dispatch.
  1272. */
  1273. ppgtt = req->ctx->ppgtt;
  1274. if (ppgtt && req->engine->id != RCS)
  1275. ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
  1276. return 0;
  1277. }
  1278. static int
  1279. gen6_ring_sync_to(struct drm_i915_gem_request *req,
  1280. struct drm_i915_gem_request *signal)
  1281. {
  1282. struct intel_ring *ring = req->ring;
  1283. u32 dw1 = MI_SEMAPHORE_MBOX |
  1284. MI_SEMAPHORE_COMPARE |
  1285. MI_SEMAPHORE_REGISTER;
  1286. u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->id];
  1287. int ret;
  1288. WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
  1289. ret = intel_ring_begin(req, 4);
  1290. if (ret)
  1291. return ret;
  1292. intel_ring_emit(ring, dw1 | wait_mbox);
  1293. /* Throughout all of the GEM code, seqno passed implies our current
  1294. * seqno is >= the last seqno executed. However for hardware the
  1295. * comparison is strictly greater than.
  1296. */
  1297. intel_ring_emit(ring, signal->fence.seqno - 1);
  1298. intel_ring_emit(ring, 0);
  1299. intel_ring_emit(ring, MI_NOOP);
  1300. intel_ring_advance(ring);
  1301. return 0;
  1302. }
  1303. static void
  1304. gen5_seqno_barrier(struct intel_engine_cs *engine)
  1305. {
  1306. /* MI_STORE are internally buffered by the GPU and not flushed
  1307. * either by MI_FLUSH or SyncFlush or any other combination of
  1308. * MI commands.
  1309. *
  1310. * "Only the submission of the store operation is guaranteed.
  1311. * The write result will be complete (coherent) some time later
  1312. * (this is practically a finite period but there is no guaranteed
  1313. * latency)."
  1314. *
  1315. * Empirically, we observe that we need a delay of at least 75us to
  1316. * be sure that the seqno write is visible by the CPU.
  1317. */
  1318. usleep_range(125, 250);
  1319. }
  1320. static void
  1321. gen6_seqno_barrier(struct intel_engine_cs *engine)
  1322. {
  1323. struct drm_i915_private *dev_priv = engine->i915;
  1324. /* Workaround to force correct ordering between irq and seqno writes on
  1325. * ivb (and maybe also on snb) by reading from a CS register (like
  1326. * ACTHD) before reading the status page.
  1327. *
  1328. * Note that this effectively stalls the read by the time it takes to
  1329. * do a memory transaction, which more or less ensures that the write
  1330. * from the GPU has sufficient time to invalidate the CPU cacheline.
  1331. * Alternatively we could delay the interrupt from the CS ring to give
  1332. * the write time to land, but that would incur a delay after every
  1333. * batch i.e. much more frequent than a delay when waiting for the
  1334. * interrupt (with the same net latency).
  1335. *
  1336. * Also note that to prevent whole machine hangs on gen7, we have to
  1337. * take the spinlock to guard against concurrent cacheline access.
  1338. */
  1339. spin_lock_irq(&dev_priv->uncore.lock);
  1340. POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
  1341. spin_unlock_irq(&dev_priv->uncore.lock);
  1342. }
  1343. static void
  1344. gen5_irq_enable(struct intel_engine_cs *engine)
  1345. {
  1346. gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
  1347. }
  1348. static void
  1349. gen5_irq_disable(struct intel_engine_cs *engine)
  1350. {
  1351. gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
  1352. }
  1353. static void
  1354. i9xx_irq_enable(struct intel_engine_cs *engine)
  1355. {
  1356. struct drm_i915_private *dev_priv = engine->i915;
  1357. dev_priv->irq_mask &= ~engine->irq_enable_mask;
  1358. I915_WRITE(IMR, dev_priv->irq_mask);
  1359. POSTING_READ_FW(RING_IMR(engine->mmio_base));
  1360. }
  1361. static void
  1362. i9xx_irq_disable(struct intel_engine_cs *engine)
  1363. {
  1364. struct drm_i915_private *dev_priv = engine->i915;
  1365. dev_priv->irq_mask |= engine->irq_enable_mask;
  1366. I915_WRITE(IMR, dev_priv->irq_mask);
  1367. }
  1368. static void
  1369. i8xx_irq_enable(struct intel_engine_cs *engine)
  1370. {
  1371. struct drm_i915_private *dev_priv = engine->i915;
  1372. dev_priv->irq_mask &= ~engine->irq_enable_mask;
  1373. I915_WRITE16(IMR, dev_priv->irq_mask);
  1374. POSTING_READ16(RING_IMR(engine->mmio_base));
  1375. }
  1376. static void
  1377. i8xx_irq_disable(struct intel_engine_cs *engine)
  1378. {
  1379. struct drm_i915_private *dev_priv = engine->i915;
  1380. dev_priv->irq_mask |= engine->irq_enable_mask;
  1381. I915_WRITE16(IMR, dev_priv->irq_mask);
  1382. }
  1383. static int
  1384. bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  1385. {
  1386. struct intel_ring *ring = req->ring;
  1387. int ret;
  1388. ret = intel_ring_begin(req, 2);
  1389. if (ret)
  1390. return ret;
  1391. intel_ring_emit(ring, MI_FLUSH);
  1392. intel_ring_emit(ring, MI_NOOP);
  1393. intel_ring_advance(ring);
  1394. return 0;
  1395. }
  1396. static void
  1397. gen6_irq_enable(struct intel_engine_cs *engine)
  1398. {
  1399. struct drm_i915_private *dev_priv = engine->i915;
  1400. I915_WRITE_IMR(engine,
  1401. ~(engine->irq_enable_mask |
  1402. engine->irq_keep_mask));
  1403. gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
  1404. }
  1405. static void
  1406. gen6_irq_disable(struct intel_engine_cs *engine)
  1407. {
  1408. struct drm_i915_private *dev_priv = engine->i915;
  1409. I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
  1410. gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
  1411. }
  1412. static void
  1413. hsw_vebox_irq_enable(struct intel_engine_cs *engine)
  1414. {
  1415. struct drm_i915_private *dev_priv = engine->i915;
  1416. I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
  1417. gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
  1418. }
  1419. static void
  1420. hsw_vebox_irq_disable(struct intel_engine_cs *engine)
  1421. {
  1422. struct drm_i915_private *dev_priv = engine->i915;
  1423. I915_WRITE_IMR(engine, ~0);
  1424. gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
  1425. }
  1426. static void
  1427. gen8_irq_enable(struct intel_engine_cs *engine)
  1428. {
  1429. struct drm_i915_private *dev_priv = engine->i915;
  1430. I915_WRITE_IMR(engine,
  1431. ~(engine->irq_enable_mask |
  1432. engine->irq_keep_mask));
  1433. POSTING_READ_FW(RING_IMR(engine->mmio_base));
  1434. }
  1435. static void
  1436. gen8_irq_disable(struct intel_engine_cs *engine)
  1437. {
  1438. struct drm_i915_private *dev_priv = engine->i915;
  1439. I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
  1440. }
  1441. static int
  1442. i965_emit_bb_start(struct drm_i915_gem_request *req,
  1443. u64 offset, u32 length,
  1444. unsigned int dispatch_flags)
  1445. {
  1446. struct intel_ring *ring = req->ring;
  1447. int ret;
  1448. ret = intel_ring_begin(req, 2);
  1449. if (ret)
  1450. return ret;
  1451. intel_ring_emit(ring,
  1452. MI_BATCH_BUFFER_START |
  1453. MI_BATCH_GTT |
  1454. (dispatch_flags & I915_DISPATCH_SECURE ?
  1455. 0 : MI_BATCH_NON_SECURE_I965));
  1456. intel_ring_emit(ring, offset);
  1457. intel_ring_advance(ring);
  1458. return 0;
  1459. }
  1460. /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
  1461. #define I830_BATCH_LIMIT (256*1024)
  1462. #define I830_TLB_ENTRIES (2)
  1463. #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
  1464. static int
  1465. i830_emit_bb_start(struct drm_i915_gem_request *req,
  1466. u64 offset, u32 len,
  1467. unsigned int dispatch_flags)
  1468. {
  1469. struct intel_ring *ring = req->ring;
  1470. u32 cs_offset = req->engine->scratch->node.start;
  1471. int ret;
  1472. ret = intel_ring_begin(req, 6);
  1473. if (ret)
  1474. return ret;
  1475. /* Evict the invalid PTE TLBs */
  1476. intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
  1477. intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
  1478. intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
  1479. intel_ring_emit(ring, cs_offset);
  1480. intel_ring_emit(ring, 0xdeadbeef);
  1481. intel_ring_emit(ring, MI_NOOP);
  1482. intel_ring_advance(ring);
  1483. if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
  1484. if (len > I830_BATCH_LIMIT)
  1485. return -ENOSPC;
  1486. ret = intel_ring_begin(req, 6 + 2);
  1487. if (ret)
  1488. return ret;
  1489. /* Blit the batch (which has now all relocs applied) to the
  1490. * stable batch scratch bo area (so that the CS never
  1491. * stumbles over its tlb invalidation bug) ...
  1492. */
  1493. intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
  1494. intel_ring_emit(ring,
  1495. BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
  1496. intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
  1497. intel_ring_emit(ring, cs_offset);
  1498. intel_ring_emit(ring, 4096);
  1499. intel_ring_emit(ring, offset);
  1500. intel_ring_emit(ring, MI_FLUSH);
  1501. intel_ring_emit(ring, MI_NOOP);
  1502. intel_ring_advance(ring);
  1503. /* ... and execute it. */
  1504. offset = cs_offset;
  1505. }
  1506. ret = intel_ring_begin(req, 2);
  1507. if (ret)
  1508. return ret;
  1509. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
  1510. intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
  1511. 0 : MI_BATCH_NON_SECURE));
  1512. intel_ring_advance(ring);
  1513. return 0;
  1514. }
  1515. static int
  1516. i915_emit_bb_start(struct drm_i915_gem_request *req,
  1517. u64 offset, u32 len,
  1518. unsigned int dispatch_flags)
  1519. {
  1520. struct intel_ring *ring = req->ring;
  1521. int ret;
  1522. ret = intel_ring_begin(req, 2);
  1523. if (ret)
  1524. return ret;
  1525. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
  1526. intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
  1527. 0 : MI_BATCH_NON_SECURE));
  1528. intel_ring_advance(ring);
  1529. return 0;
  1530. }
  1531. static void cleanup_phys_status_page(struct intel_engine_cs *engine)
  1532. {
  1533. struct drm_i915_private *dev_priv = engine->i915;
  1534. if (!dev_priv->status_page_dmah)
  1535. return;
  1536. drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
  1537. engine->status_page.page_addr = NULL;
  1538. }
  1539. static void cleanup_status_page(struct intel_engine_cs *engine)
  1540. {
  1541. struct i915_vma *vma;
  1542. vma = fetch_and_zero(&engine->status_page.vma);
  1543. if (!vma)
  1544. return;
  1545. i915_vma_unpin(vma);
  1546. i915_gem_object_unpin_map(vma->obj);
  1547. i915_vma_put(vma);
  1548. }
  1549. static int init_status_page(struct intel_engine_cs *engine)
  1550. {
  1551. struct drm_i915_gem_object *obj;
  1552. struct i915_vma *vma;
  1553. unsigned int flags;
  1554. int ret;
  1555. obj = i915_gem_object_create(&engine->i915->drm, 4096);
  1556. if (IS_ERR(obj)) {
  1557. DRM_ERROR("Failed to allocate status page\n");
  1558. return PTR_ERR(obj);
  1559. }
  1560. ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  1561. if (ret)
  1562. goto err;
  1563. vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
  1564. if (IS_ERR(vma)) {
  1565. ret = PTR_ERR(vma);
  1566. goto err;
  1567. }
  1568. flags = PIN_GLOBAL;
  1569. if (!HAS_LLC(engine->i915))
  1570. /* On g33, we cannot place HWS above 256MiB, so
  1571. * restrict its pinning to the low mappable arena.
  1572. * Though this restriction is not documented for
  1573. * gen4, gen5, or byt, they also behave similarly
  1574. * and hang if the HWS is placed at the top of the
  1575. * GTT. To generalise, it appears that all !llc
  1576. * platforms have issues with us placing the HWS
  1577. * above the mappable region (even though we never
  1578. * actualy map it).
  1579. */
  1580. flags |= PIN_MAPPABLE;
  1581. ret = i915_vma_pin(vma, 0, 4096, flags);
  1582. if (ret)
  1583. goto err;
  1584. engine->status_page.vma = vma;
  1585. engine->status_page.ggtt_offset = vma->node.start;
  1586. engine->status_page.page_addr =
  1587. i915_gem_object_pin_map(obj, I915_MAP_WB);
  1588. DRM_DEBUG_DRIVER("%s hws offset: 0x%08llx\n",
  1589. engine->name, vma->node.start);
  1590. return 0;
  1591. err:
  1592. i915_gem_object_put(obj);
  1593. return ret;
  1594. }
  1595. static int init_phys_status_page(struct intel_engine_cs *engine)
  1596. {
  1597. struct drm_i915_private *dev_priv = engine->i915;
  1598. dev_priv->status_page_dmah =
  1599. drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
  1600. if (!dev_priv->status_page_dmah)
  1601. return -ENOMEM;
  1602. engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1603. memset(engine->status_page.page_addr, 0, PAGE_SIZE);
  1604. return 0;
  1605. }
  1606. int intel_ring_pin(struct intel_ring *ring)
  1607. {
  1608. /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
  1609. unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
  1610. struct i915_vma *vma = ring->vma;
  1611. void *addr;
  1612. int ret;
  1613. GEM_BUG_ON(ring->vaddr);
  1614. if (ring->needs_iomap)
  1615. flags |= PIN_MAPPABLE;
  1616. if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
  1617. if (flags & PIN_MAPPABLE)
  1618. ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
  1619. else
  1620. ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
  1621. if (unlikely(ret))
  1622. return ret;
  1623. }
  1624. ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
  1625. if (unlikely(ret))
  1626. return ret;
  1627. if (flags & PIN_MAPPABLE)
  1628. addr = (void __force *)i915_vma_pin_iomap(vma);
  1629. else
  1630. addr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
  1631. if (IS_ERR(addr))
  1632. goto err;
  1633. ring->vaddr = addr;
  1634. return 0;
  1635. err:
  1636. i915_vma_unpin(vma);
  1637. return PTR_ERR(addr);
  1638. }
  1639. void intel_ring_unpin(struct intel_ring *ring)
  1640. {
  1641. GEM_BUG_ON(!ring->vma);
  1642. GEM_BUG_ON(!ring->vaddr);
  1643. if (ring->needs_iomap)
  1644. i915_vma_unpin_iomap(ring->vma);
  1645. else
  1646. i915_gem_object_unpin_map(ring->vma->obj);
  1647. ring->vaddr = NULL;
  1648. i915_vma_unpin(ring->vma);
  1649. }
  1650. static struct i915_vma *
  1651. intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
  1652. {
  1653. struct drm_i915_gem_object *obj;
  1654. struct i915_vma *vma;
  1655. obj = ERR_PTR(-ENODEV);
  1656. if (!HAS_LLC(dev_priv))
  1657. obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
  1658. if (IS_ERR(obj))
  1659. obj = i915_gem_object_create(&dev_priv->drm, size);
  1660. if (IS_ERR(obj))
  1661. return ERR_CAST(obj);
  1662. /* mark ring buffers as read-only from GPU side by default */
  1663. obj->gt_ro = 1;
  1664. vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
  1665. if (IS_ERR(vma))
  1666. goto err;
  1667. return vma;
  1668. err:
  1669. i915_gem_object_put(obj);
  1670. return vma;
  1671. }
  1672. struct intel_ring *
  1673. intel_engine_create_ring(struct intel_engine_cs *engine, int size)
  1674. {
  1675. struct intel_ring *ring;
  1676. struct i915_vma *vma;
  1677. GEM_BUG_ON(!is_power_of_2(size));
  1678. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  1679. if (!ring)
  1680. return ERR_PTR(-ENOMEM);
  1681. ring->engine = engine;
  1682. INIT_LIST_HEAD(&ring->request_list);
  1683. ring->size = size;
  1684. /* Workaround an erratum on the i830 which causes a hang if
  1685. * the TAIL pointer points to within the last 2 cachelines
  1686. * of the buffer.
  1687. */
  1688. ring->effective_size = size;
  1689. if (IS_I830(engine->i915) || IS_845G(engine->i915))
  1690. ring->effective_size -= 2 * CACHELINE_BYTES;
  1691. ring->last_retired_head = -1;
  1692. intel_ring_update_space(ring);
  1693. vma = intel_ring_create_vma(engine->i915, size);
  1694. if (IS_ERR(vma)) {
  1695. kfree(ring);
  1696. return ERR_CAST(vma);
  1697. }
  1698. ring->vma = vma;
  1699. if (!HAS_LLC(engine->i915) || vma->obj->stolen)
  1700. ring->needs_iomap = true;
  1701. list_add(&ring->link, &engine->buffers);
  1702. return ring;
  1703. }
  1704. void
  1705. intel_ring_free(struct intel_ring *ring)
  1706. {
  1707. i915_vma_put(ring->vma);
  1708. list_del(&ring->link);
  1709. kfree(ring);
  1710. }
  1711. static int intel_ring_context_pin(struct i915_gem_context *ctx,
  1712. struct intel_engine_cs *engine)
  1713. {
  1714. struct intel_context *ce = &ctx->engine[engine->id];
  1715. int ret;
  1716. lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  1717. if (ce->pin_count++)
  1718. return 0;
  1719. if (ce->state) {
  1720. ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
  1721. if (ret)
  1722. goto error;
  1723. ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
  1724. PIN_GLOBAL | PIN_HIGH);
  1725. if (ret)
  1726. goto error;
  1727. }
  1728. /* The kernel context is only used as a placeholder for flushing the
  1729. * active context. It is never used for submitting user rendering and
  1730. * as such never requires the golden render context, and so we can skip
  1731. * emitting it when we switch to the kernel context. This is required
  1732. * as during eviction we cannot allocate and pin the renderstate in
  1733. * order to initialise the context.
  1734. */
  1735. if (ctx == ctx->i915->kernel_context)
  1736. ce->initialised = true;
  1737. i915_gem_context_get(ctx);
  1738. return 0;
  1739. error:
  1740. ce->pin_count = 0;
  1741. return ret;
  1742. }
  1743. static void intel_ring_context_unpin(struct i915_gem_context *ctx,
  1744. struct intel_engine_cs *engine)
  1745. {
  1746. struct intel_context *ce = &ctx->engine[engine->id];
  1747. lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  1748. if (--ce->pin_count)
  1749. return;
  1750. if (ce->state)
  1751. i915_vma_unpin(ce->state);
  1752. i915_gem_context_put(ctx);
  1753. }
  1754. static int intel_init_ring_buffer(struct intel_engine_cs *engine)
  1755. {
  1756. struct drm_i915_private *dev_priv = engine->i915;
  1757. struct intel_ring *ring;
  1758. int ret;
  1759. WARN_ON(engine->buffer);
  1760. intel_engine_setup_common(engine);
  1761. memset(engine->semaphore.sync_seqno, 0,
  1762. sizeof(engine->semaphore.sync_seqno));
  1763. ret = intel_engine_init_common(engine);
  1764. if (ret)
  1765. goto error;
  1766. /* We may need to do things with the shrinker which
  1767. * require us to immediately switch back to the default
  1768. * context. This can cause a problem as pinning the
  1769. * default context also requires GTT space which may not
  1770. * be available. To avoid this we always pin the default
  1771. * context.
  1772. */
  1773. ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
  1774. if (ret)
  1775. goto error;
  1776. ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
  1777. if (IS_ERR(ring)) {
  1778. ret = PTR_ERR(ring);
  1779. goto error;
  1780. }
  1781. if (I915_NEED_GFX_HWS(dev_priv)) {
  1782. ret = init_status_page(engine);
  1783. if (ret)
  1784. goto error;
  1785. } else {
  1786. WARN_ON(engine->id != RCS);
  1787. ret = init_phys_status_page(engine);
  1788. if (ret)
  1789. goto error;
  1790. }
  1791. ret = intel_ring_pin(ring);
  1792. if (ret) {
  1793. intel_ring_free(ring);
  1794. goto error;
  1795. }
  1796. engine->buffer = ring;
  1797. return 0;
  1798. error:
  1799. intel_engine_cleanup(engine);
  1800. return ret;
  1801. }
  1802. void intel_engine_cleanup(struct intel_engine_cs *engine)
  1803. {
  1804. struct drm_i915_private *dev_priv;
  1805. if (!intel_engine_initialized(engine))
  1806. return;
  1807. dev_priv = engine->i915;
  1808. if (engine->buffer) {
  1809. WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
  1810. intel_ring_unpin(engine->buffer);
  1811. intel_ring_free(engine->buffer);
  1812. engine->buffer = NULL;
  1813. }
  1814. if (engine->cleanup)
  1815. engine->cleanup(engine);
  1816. if (I915_NEED_GFX_HWS(dev_priv)) {
  1817. cleanup_status_page(engine);
  1818. } else {
  1819. WARN_ON(engine->id != RCS);
  1820. cleanup_phys_status_page(engine);
  1821. }
  1822. intel_engine_cleanup_common(engine);
  1823. intel_ring_context_unpin(dev_priv->kernel_context, engine);
  1824. engine->i915 = NULL;
  1825. }
  1826. int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
  1827. {
  1828. int ret;
  1829. /* Flush enough space to reduce the likelihood of waiting after
  1830. * we start building the request - in which case we will just
  1831. * have to repeat work.
  1832. */
  1833. request->reserved_space += LEGACY_REQUEST_SIZE;
  1834. request->ring = request->engine->buffer;
  1835. ret = intel_ring_begin(request, 0);
  1836. if (ret)
  1837. return ret;
  1838. request->reserved_space -= LEGACY_REQUEST_SIZE;
  1839. return 0;
  1840. }
  1841. static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
  1842. {
  1843. struct intel_ring *ring = req->ring;
  1844. struct drm_i915_gem_request *target;
  1845. int ret;
  1846. intel_ring_update_space(ring);
  1847. if (ring->space >= bytes)
  1848. return 0;
  1849. /*
  1850. * Space is reserved in the ringbuffer for finalising the request,
  1851. * as that cannot be allowed to fail. During request finalisation,
  1852. * reserved_space is set to 0 to stop the overallocation and the
  1853. * assumption is that then we never need to wait (which has the
  1854. * risk of failing with EINTR).
  1855. *
  1856. * See also i915_gem_request_alloc() and i915_add_request().
  1857. */
  1858. GEM_BUG_ON(!req->reserved_space);
  1859. list_for_each_entry(target, &ring->request_list, ring_link) {
  1860. unsigned space;
  1861. /* Would completion of this request free enough space? */
  1862. space = __intel_ring_space(target->postfix, ring->tail,
  1863. ring->size);
  1864. if (space >= bytes)
  1865. break;
  1866. }
  1867. if (WARN_ON(&target->ring_link == &ring->request_list))
  1868. return -ENOSPC;
  1869. ret = i915_wait_request(target, true, NULL, NO_WAITBOOST);
  1870. if (ret)
  1871. return ret;
  1872. if (i915_reset_in_progress(&target->i915->gpu_error))
  1873. return -EAGAIN;
  1874. i915_gem_request_retire_upto(target);
  1875. intel_ring_update_space(ring);
  1876. GEM_BUG_ON(ring->space < bytes);
  1877. return 0;
  1878. }
  1879. int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
  1880. {
  1881. struct intel_ring *ring = req->ring;
  1882. int remain_actual = ring->size - ring->tail;
  1883. int remain_usable = ring->effective_size - ring->tail;
  1884. int bytes = num_dwords * sizeof(u32);
  1885. int total_bytes, wait_bytes;
  1886. bool need_wrap = false;
  1887. total_bytes = bytes + req->reserved_space;
  1888. if (unlikely(bytes > remain_usable)) {
  1889. /*
  1890. * Not enough space for the basic request. So need to flush
  1891. * out the remainder and then wait for base + reserved.
  1892. */
  1893. wait_bytes = remain_actual + total_bytes;
  1894. need_wrap = true;
  1895. } else if (unlikely(total_bytes > remain_usable)) {
  1896. /*
  1897. * The base request will fit but the reserved space
  1898. * falls off the end. So we don't need an immediate wrap
  1899. * and only need to effectively wait for the reserved
  1900. * size space from the start of ringbuffer.
  1901. */
  1902. wait_bytes = remain_actual + req->reserved_space;
  1903. } else {
  1904. /* No wrapping required, just waiting. */
  1905. wait_bytes = total_bytes;
  1906. }
  1907. if (wait_bytes > ring->space) {
  1908. int ret = wait_for_space(req, wait_bytes);
  1909. if (unlikely(ret))
  1910. return ret;
  1911. }
  1912. if (unlikely(need_wrap)) {
  1913. GEM_BUG_ON(remain_actual > ring->space);
  1914. GEM_BUG_ON(ring->tail + remain_actual > ring->size);
  1915. /* Fill the tail with MI_NOOP */
  1916. memset(ring->vaddr + ring->tail, 0, remain_actual);
  1917. ring->tail = 0;
  1918. ring->space -= remain_actual;
  1919. }
  1920. ring->space -= bytes;
  1921. GEM_BUG_ON(ring->space < 0);
  1922. return 0;
  1923. }
  1924. /* Align the ring tail to a cacheline boundary */
  1925. int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
  1926. {
  1927. struct intel_ring *ring = req->ring;
  1928. int num_dwords =
  1929. (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
  1930. int ret;
  1931. if (num_dwords == 0)
  1932. return 0;
  1933. num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
  1934. ret = intel_ring_begin(req, num_dwords);
  1935. if (ret)
  1936. return ret;
  1937. while (num_dwords--)
  1938. intel_ring_emit(ring, MI_NOOP);
  1939. intel_ring_advance(ring);
  1940. return 0;
  1941. }
  1942. void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
  1943. {
  1944. struct drm_i915_private *dev_priv = engine->i915;
  1945. /* Our semaphore implementation is strictly monotonic (i.e. we proceed
  1946. * so long as the semaphore value in the register/page is greater
  1947. * than the sync value), so whenever we reset the seqno,
  1948. * so long as we reset the tracking semaphore value to 0, it will
  1949. * always be before the next request's seqno. If we don't reset
  1950. * the semaphore value, then when the seqno moves backwards all
  1951. * future waits will complete instantly (causing rendering corruption).
  1952. */
  1953. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
  1954. I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
  1955. I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
  1956. if (HAS_VEBOX(dev_priv))
  1957. I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
  1958. }
  1959. if (dev_priv->semaphore_obj) {
  1960. struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
  1961. struct page *page = i915_gem_object_get_dirty_page(obj, 0);
  1962. void *semaphores = kmap(page);
  1963. memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
  1964. 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
  1965. kunmap(page);
  1966. }
  1967. memset(engine->semaphore.sync_seqno, 0,
  1968. sizeof(engine->semaphore.sync_seqno));
  1969. intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
  1970. if (engine->irq_seqno_barrier)
  1971. engine->irq_seqno_barrier(engine);
  1972. engine->last_submitted_seqno = seqno;
  1973. engine->hangcheck.seqno = seqno;
  1974. /* After manually advancing the seqno, fake the interrupt in case
  1975. * there are any waiters for that seqno.
  1976. */
  1977. intel_engine_wakeup(engine);
  1978. }
  1979. static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
  1980. {
  1981. struct drm_i915_private *dev_priv = request->i915;
  1982. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1983. /* Every tail move must follow the sequence below */
  1984. /* Disable notification that the ring is IDLE. The GT
  1985. * will then assume that it is busy and bring it out of rc6.
  1986. */
  1987. I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1988. _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1989. /* Clear the context id. Here be magic! */
  1990. I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
  1991. /* Wait for the ring not to be idle, i.e. for it to wake up. */
  1992. if (intel_wait_for_register_fw(dev_priv,
  1993. GEN6_BSD_SLEEP_PSMI_CONTROL,
  1994. GEN6_BSD_SLEEP_INDICATOR,
  1995. 0,
  1996. 50))
  1997. DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
  1998. /* Now that the ring is fully powered up, update the tail */
  1999. i9xx_submit_request(request);
  2000. /* Let the ring send IDLE messages to the GT again,
  2001. * and so let it sleep to conserve power when idle.
  2002. */
  2003. I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
  2004. _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  2005. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  2006. }
  2007. static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  2008. {
  2009. struct intel_ring *ring = req->ring;
  2010. uint32_t cmd;
  2011. int ret;
  2012. ret = intel_ring_begin(req, 4);
  2013. if (ret)
  2014. return ret;
  2015. cmd = MI_FLUSH_DW;
  2016. if (INTEL_GEN(req->i915) >= 8)
  2017. cmd += 1;
  2018. /* We always require a command barrier so that subsequent
  2019. * commands, such as breadcrumb interrupts, are strictly ordered
  2020. * wrt the contents of the write cache being flushed to memory
  2021. * (and thus being coherent from the CPU).
  2022. */
  2023. cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  2024. /*
  2025. * Bspec vol 1c.5 - video engine command streamer:
  2026. * "If ENABLED, all TLBs will be invalidated once the flush
  2027. * operation is complete. This bit is only valid when the
  2028. * Post-Sync Operation field is a value of 1h or 3h."
  2029. */
  2030. if (mode & EMIT_INVALIDATE)
  2031. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
  2032. intel_ring_emit(ring, cmd);
  2033. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  2034. if (INTEL_GEN(req->i915) >= 8) {
  2035. intel_ring_emit(ring, 0); /* upper addr */
  2036. intel_ring_emit(ring, 0); /* value */
  2037. } else {
  2038. intel_ring_emit(ring, 0);
  2039. intel_ring_emit(ring, MI_NOOP);
  2040. }
  2041. intel_ring_advance(ring);
  2042. return 0;
  2043. }
  2044. static int
  2045. gen8_emit_bb_start(struct drm_i915_gem_request *req,
  2046. u64 offset, u32 len,
  2047. unsigned int dispatch_flags)
  2048. {
  2049. struct intel_ring *ring = req->ring;
  2050. bool ppgtt = USES_PPGTT(req->i915) &&
  2051. !(dispatch_flags & I915_DISPATCH_SECURE);
  2052. int ret;
  2053. ret = intel_ring_begin(req, 4);
  2054. if (ret)
  2055. return ret;
  2056. /* FIXME(BDW): Address space and security selectors. */
  2057. intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
  2058. (dispatch_flags & I915_DISPATCH_RS ?
  2059. MI_BATCH_RESOURCE_STREAMER : 0));
  2060. intel_ring_emit(ring, lower_32_bits(offset));
  2061. intel_ring_emit(ring, upper_32_bits(offset));
  2062. intel_ring_emit(ring, MI_NOOP);
  2063. intel_ring_advance(ring);
  2064. return 0;
  2065. }
  2066. static int
  2067. hsw_emit_bb_start(struct drm_i915_gem_request *req,
  2068. u64 offset, u32 len,
  2069. unsigned int dispatch_flags)
  2070. {
  2071. struct intel_ring *ring = req->ring;
  2072. int ret;
  2073. ret = intel_ring_begin(req, 2);
  2074. if (ret)
  2075. return ret;
  2076. intel_ring_emit(ring,
  2077. MI_BATCH_BUFFER_START |
  2078. (dispatch_flags & I915_DISPATCH_SECURE ?
  2079. 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
  2080. (dispatch_flags & I915_DISPATCH_RS ?
  2081. MI_BATCH_RESOURCE_STREAMER : 0));
  2082. /* bit0-7 is the length on GEN6+ */
  2083. intel_ring_emit(ring, offset);
  2084. intel_ring_advance(ring);
  2085. return 0;
  2086. }
  2087. static int
  2088. gen6_emit_bb_start(struct drm_i915_gem_request *req,
  2089. u64 offset, u32 len,
  2090. unsigned int dispatch_flags)
  2091. {
  2092. struct intel_ring *ring = req->ring;
  2093. int ret;
  2094. ret = intel_ring_begin(req, 2);
  2095. if (ret)
  2096. return ret;
  2097. intel_ring_emit(ring,
  2098. MI_BATCH_BUFFER_START |
  2099. (dispatch_flags & I915_DISPATCH_SECURE ?
  2100. 0 : MI_BATCH_NON_SECURE_I965));
  2101. /* bit0-7 is the length on GEN6+ */
  2102. intel_ring_emit(ring, offset);
  2103. intel_ring_advance(ring);
  2104. return 0;
  2105. }
  2106. /* Blitter support (SandyBridge+) */
  2107. static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  2108. {
  2109. struct intel_ring *ring = req->ring;
  2110. uint32_t cmd;
  2111. int ret;
  2112. ret = intel_ring_begin(req, 4);
  2113. if (ret)
  2114. return ret;
  2115. cmd = MI_FLUSH_DW;
  2116. if (INTEL_GEN(req->i915) >= 8)
  2117. cmd += 1;
  2118. /* We always require a command barrier so that subsequent
  2119. * commands, such as breadcrumb interrupts, are strictly ordered
  2120. * wrt the contents of the write cache being flushed to memory
  2121. * (and thus being coherent from the CPU).
  2122. */
  2123. cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  2124. /*
  2125. * Bspec vol 1c.3 - blitter engine command streamer:
  2126. * "If ENABLED, all TLBs will be invalidated once the flush
  2127. * operation is complete. This bit is only valid when the
  2128. * Post-Sync Operation field is a value of 1h or 3h."
  2129. */
  2130. if (mode & EMIT_INVALIDATE)
  2131. cmd |= MI_INVALIDATE_TLB;
  2132. intel_ring_emit(ring, cmd);
  2133. intel_ring_emit(ring,
  2134. I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  2135. if (INTEL_GEN(req->i915) >= 8) {
  2136. intel_ring_emit(ring, 0); /* upper addr */
  2137. intel_ring_emit(ring, 0); /* value */
  2138. } else {
  2139. intel_ring_emit(ring, 0);
  2140. intel_ring_emit(ring, MI_NOOP);
  2141. }
  2142. intel_ring_advance(ring);
  2143. return 0;
  2144. }
  2145. static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
  2146. struct intel_engine_cs *engine)
  2147. {
  2148. struct drm_i915_gem_object *obj;
  2149. int ret, i;
  2150. if (!i915.semaphores)
  2151. return;
  2152. if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
  2153. obj = i915_gem_object_create(&dev_priv->drm, 4096);
  2154. if (IS_ERR(obj)) {
  2155. DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
  2156. i915.semaphores = 0;
  2157. } else {
  2158. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  2159. ret = i915_gem_object_ggtt_pin(obj, NULL,
  2160. 0, 0, PIN_HIGH);
  2161. if (ret != 0) {
  2162. i915_gem_object_put(obj);
  2163. DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
  2164. i915.semaphores = 0;
  2165. } else {
  2166. dev_priv->semaphore_obj = obj;
  2167. }
  2168. }
  2169. }
  2170. if (!i915.semaphores)
  2171. return;
  2172. if (INTEL_GEN(dev_priv) >= 8) {
  2173. u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
  2174. engine->semaphore.sync_to = gen8_ring_sync_to;
  2175. engine->semaphore.signal = gen8_xcs_signal;
  2176. for (i = 0; i < I915_NUM_ENGINES; i++) {
  2177. u64 ring_offset;
  2178. if (i != engine->id)
  2179. ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
  2180. else
  2181. ring_offset = MI_SEMAPHORE_SYNC_INVALID;
  2182. engine->semaphore.signal_ggtt[i] = ring_offset;
  2183. }
  2184. } else if (INTEL_GEN(dev_priv) >= 6) {
  2185. engine->semaphore.sync_to = gen6_ring_sync_to;
  2186. engine->semaphore.signal = gen6_signal;
  2187. /*
  2188. * The current semaphore is only applied on pre-gen8
  2189. * platform. And there is no VCS2 ring on the pre-gen8
  2190. * platform. So the semaphore between RCS and VCS2 is
  2191. * initialized as INVALID. Gen8 will initialize the
  2192. * sema between VCS2 and RCS later.
  2193. */
  2194. for (i = 0; i < I915_NUM_ENGINES; i++) {
  2195. static const struct {
  2196. u32 wait_mbox;
  2197. i915_reg_t mbox_reg;
  2198. } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
  2199. [RCS] = {
  2200. [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
  2201. [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
  2202. [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
  2203. },
  2204. [VCS] = {
  2205. [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
  2206. [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
  2207. [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
  2208. },
  2209. [BCS] = {
  2210. [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
  2211. [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
  2212. [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
  2213. },
  2214. [VECS] = {
  2215. [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
  2216. [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
  2217. [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
  2218. },
  2219. };
  2220. u32 wait_mbox;
  2221. i915_reg_t mbox_reg;
  2222. if (i == engine->id || i == VCS2) {
  2223. wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
  2224. mbox_reg = GEN6_NOSYNC;
  2225. } else {
  2226. wait_mbox = sem_data[engine->id][i].wait_mbox;
  2227. mbox_reg = sem_data[engine->id][i].mbox_reg;
  2228. }
  2229. engine->semaphore.mbox.wait[i] = wait_mbox;
  2230. engine->semaphore.mbox.signal[i] = mbox_reg;
  2231. }
  2232. }
  2233. }
  2234. static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
  2235. struct intel_engine_cs *engine)
  2236. {
  2237. engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
  2238. if (INTEL_GEN(dev_priv) >= 8) {
  2239. engine->irq_enable = gen8_irq_enable;
  2240. engine->irq_disable = gen8_irq_disable;
  2241. engine->irq_seqno_barrier = gen6_seqno_barrier;
  2242. } else if (INTEL_GEN(dev_priv) >= 6) {
  2243. engine->irq_enable = gen6_irq_enable;
  2244. engine->irq_disable = gen6_irq_disable;
  2245. engine->irq_seqno_barrier = gen6_seqno_barrier;
  2246. } else if (INTEL_GEN(dev_priv) >= 5) {
  2247. engine->irq_enable = gen5_irq_enable;
  2248. engine->irq_disable = gen5_irq_disable;
  2249. engine->irq_seqno_barrier = gen5_seqno_barrier;
  2250. } else if (INTEL_GEN(dev_priv) >= 3) {
  2251. engine->irq_enable = i9xx_irq_enable;
  2252. engine->irq_disable = i9xx_irq_disable;
  2253. } else {
  2254. engine->irq_enable = i8xx_irq_enable;
  2255. engine->irq_disable = i8xx_irq_disable;
  2256. }
  2257. }
  2258. static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
  2259. struct intel_engine_cs *engine)
  2260. {
  2261. intel_ring_init_irq(dev_priv, engine);
  2262. intel_ring_init_semaphores(dev_priv, engine);
  2263. engine->init_hw = init_ring_common;
  2264. engine->emit_request = i9xx_emit_request;
  2265. if (i915.semaphores)
  2266. engine->emit_request = gen6_sema_emit_request;
  2267. engine->submit_request = i9xx_submit_request;
  2268. if (INTEL_GEN(dev_priv) >= 8)
  2269. engine->emit_bb_start = gen8_emit_bb_start;
  2270. else if (INTEL_GEN(dev_priv) >= 6)
  2271. engine->emit_bb_start = gen6_emit_bb_start;
  2272. else if (INTEL_GEN(dev_priv) >= 4)
  2273. engine->emit_bb_start = i965_emit_bb_start;
  2274. else if (IS_I830(dev_priv) || IS_845G(dev_priv))
  2275. engine->emit_bb_start = i830_emit_bb_start;
  2276. else
  2277. engine->emit_bb_start = i915_emit_bb_start;
  2278. }
  2279. int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
  2280. {
  2281. struct drm_i915_private *dev_priv = engine->i915;
  2282. int ret;
  2283. intel_ring_default_vfuncs(dev_priv, engine);
  2284. if (HAS_L3_DPF(dev_priv))
  2285. engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  2286. if (INTEL_GEN(dev_priv) >= 8) {
  2287. engine->init_context = intel_rcs_ctx_init;
  2288. engine->emit_request = gen8_render_emit_request;
  2289. engine->emit_flush = gen8_render_ring_flush;
  2290. if (i915.semaphores)
  2291. engine->semaphore.signal = gen8_rcs_signal;
  2292. } else if (INTEL_GEN(dev_priv) >= 6) {
  2293. engine->init_context = intel_rcs_ctx_init;
  2294. engine->emit_flush = gen7_render_ring_flush;
  2295. if (IS_GEN6(dev_priv))
  2296. engine->emit_flush = gen6_render_ring_flush;
  2297. } else if (IS_GEN5(dev_priv)) {
  2298. engine->emit_flush = gen4_render_ring_flush;
  2299. } else {
  2300. if (INTEL_GEN(dev_priv) < 4)
  2301. engine->emit_flush = gen2_render_ring_flush;
  2302. else
  2303. engine->emit_flush = gen4_render_ring_flush;
  2304. engine->irq_enable_mask = I915_USER_INTERRUPT;
  2305. }
  2306. if (IS_HASWELL(dev_priv))
  2307. engine->emit_bb_start = hsw_emit_bb_start;
  2308. engine->init_hw = init_render_ring;
  2309. engine->cleanup = render_ring_cleanup;
  2310. ret = intel_init_ring_buffer(engine);
  2311. if (ret)
  2312. return ret;
  2313. if (INTEL_GEN(dev_priv) >= 6) {
  2314. ret = intel_engine_create_scratch(engine, 4096);
  2315. if (ret)
  2316. return ret;
  2317. } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
  2318. ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
  2319. if (ret)
  2320. return ret;
  2321. }
  2322. return 0;
  2323. }
  2324. int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
  2325. {
  2326. struct drm_i915_private *dev_priv = engine->i915;
  2327. intel_ring_default_vfuncs(dev_priv, engine);
  2328. if (INTEL_GEN(dev_priv) >= 6) {
  2329. /* gen6 bsd needs a special wa for tail updates */
  2330. if (IS_GEN6(dev_priv))
  2331. engine->submit_request = gen6_bsd_submit_request;
  2332. engine->emit_flush = gen6_bsd_ring_flush;
  2333. if (INTEL_GEN(dev_priv) < 8)
  2334. engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
  2335. } else {
  2336. engine->mmio_base = BSD_RING_BASE;
  2337. engine->emit_flush = bsd_ring_flush;
  2338. if (IS_GEN5(dev_priv))
  2339. engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
  2340. else
  2341. engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
  2342. }
  2343. return intel_init_ring_buffer(engine);
  2344. }
  2345. /**
  2346. * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
  2347. */
  2348. int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
  2349. {
  2350. struct drm_i915_private *dev_priv = engine->i915;
  2351. intel_ring_default_vfuncs(dev_priv, engine);
  2352. engine->emit_flush = gen6_bsd_ring_flush;
  2353. return intel_init_ring_buffer(engine);
  2354. }
  2355. int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
  2356. {
  2357. struct drm_i915_private *dev_priv = engine->i915;
  2358. intel_ring_default_vfuncs(dev_priv, engine);
  2359. engine->emit_flush = gen6_ring_flush;
  2360. if (INTEL_GEN(dev_priv) < 8)
  2361. engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
  2362. return intel_init_ring_buffer(engine);
  2363. }
  2364. int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
  2365. {
  2366. struct drm_i915_private *dev_priv = engine->i915;
  2367. intel_ring_default_vfuncs(dev_priv, engine);
  2368. engine->emit_flush = gen6_ring_flush;
  2369. if (INTEL_GEN(dev_priv) < 8) {
  2370. engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
  2371. engine->irq_enable = hsw_vebox_irq_enable;
  2372. engine->irq_disable = hsw_vebox_irq_disable;
  2373. }
  2374. return intel_init_ring_buffer(engine);
  2375. }