intel_uncore.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  26. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  27. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  28. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  29. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  30. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  31. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  32. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  34. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  35. static void
  36. assert_device_not_suspended(struct drm_i915_private *dev_priv)
  37. {
  38. WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
  39. "Device suspended\n");
  40. }
  41. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  42. {
  43. /* w/a for a sporadic read returning 0 by waiting for the GT
  44. * thread to wake up.
  45. */
  46. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
  47. GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
  48. DRM_ERROR("GT thread status wait timed out\n");
  49. }
  50. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  51. {
  52. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  53. /* something from same cacheline, but !FORCEWAKE */
  54. __raw_posting_read(dev_priv, ECOBUS);
  55. }
  56. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
  57. int fw_engine)
  58. {
  59. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
  60. FORCEWAKE_ACK_TIMEOUT_MS))
  61. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  62. __raw_i915_write32(dev_priv, FORCEWAKE, 1);
  63. /* something from same cacheline, but !FORCEWAKE */
  64. __raw_posting_read(dev_priv, ECOBUS);
  65. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
  66. FORCEWAKE_ACK_TIMEOUT_MS))
  67. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  68. /* WaRsForcewakeWaitTC0:snb */
  69. __gen6_gt_wait_for_thread_c0(dev_priv);
  70. }
  71. static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  72. {
  73. __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  74. /* something from same cacheline, but !FORCEWAKE_MT */
  75. __raw_posting_read(dev_priv, ECOBUS);
  76. }
  77. static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
  78. int fw_engine)
  79. {
  80. u32 forcewake_ack;
  81. if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
  82. forcewake_ack = FORCEWAKE_ACK_HSW;
  83. else
  84. forcewake_ack = FORCEWAKE_MT_ACK;
  85. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  86. FORCEWAKE_ACK_TIMEOUT_MS))
  87. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  88. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  89. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  90. /* something from same cacheline, but !FORCEWAKE_MT */
  91. __raw_posting_read(dev_priv, ECOBUS);
  92. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
  93. FORCEWAKE_ACK_TIMEOUT_MS))
  94. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  95. /* WaRsForcewakeWaitTC0:ivb,hsw */
  96. __gen6_gt_wait_for_thread_c0(dev_priv);
  97. }
  98. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  99. {
  100. u32 gtfifodbg;
  101. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  102. if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  103. __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  104. }
  105. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
  106. int fw_engine)
  107. {
  108. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  109. /* something from same cacheline, but !FORCEWAKE */
  110. __raw_posting_read(dev_priv, ECOBUS);
  111. gen6_gt_check_fifodbg(dev_priv);
  112. }
  113. static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
  114. int fw_engine)
  115. {
  116. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  117. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  118. /* something from same cacheline, but !FORCEWAKE_MT */
  119. __raw_posting_read(dev_priv, ECOBUS);
  120. if (IS_GEN7(dev_priv->dev))
  121. gen6_gt_check_fifodbg(dev_priv);
  122. }
  123. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  124. {
  125. int ret = 0;
  126. /* On VLV, FIFO will be shared by both SW and HW.
  127. * So, we need to read the FREE_ENTRIES everytime */
  128. if (IS_VALLEYVIEW(dev_priv->dev))
  129. dev_priv->uncore.fifo_count =
  130. __raw_i915_read32(dev_priv, GTFIFOCTL) &
  131. GT_FIFO_FREE_ENTRIES_MASK;
  132. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  133. int loop = 500;
  134. u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  135. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  136. udelay(10);
  137. fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  138. }
  139. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  140. ++ret;
  141. dev_priv->uncore.fifo_count = fifo;
  142. }
  143. dev_priv->uncore.fifo_count--;
  144. return ret;
  145. }
  146. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  147. {
  148. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  149. _MASKED_BIT_DISABLE(0xffff));
  150. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  151. _MASKED_BIT_DISABLE(0xffff));
  152. /* something from same cacheline, but !FORCEWAKE_VLV */
  153. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  154. }
  155. static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
  156. int fw_engine)
  157. {
  158. /* Check for Render Engine */
  159. if (FORCEWAKE_RENDER & fw_engine) {
  160. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  161. FORCEWAKE_ACK_VLV) &
  162. FORCEWAKE_KERNEL) == 0,
  163. FORCEWAKE_ACK_TIMEOUT_MS))
  164. DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  165. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  166. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  167. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  168. FORCEWAKE_ACK_VLV) &
  169. FORCEWAKE_KERNEL),
  170. FORCEWAKE_ACK_TIMEOUT_MS))
  171. DRM_ERROR("Timed out: waiting for Render to ack.\n");
  172. }
  173. /* Check for Media Engine */
  174. if (FORCEWAKE_MEDIA & fw_engine) {
  175. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  176. FORCEWAKE_ACK_MEDIA_VLV) &
  177. FORCEWAKE_KERNEL) == 0,
  178. FORCEWAKE_ACK_TIMEOUT_MS))
  179. DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
  180. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  181. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  182. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  183. FORCEWAKE_ACK_MEDIA_VLV) &
  184. FORCEWAKE_KERNEL),
  185. FORCEWAKE_ACK_TIMEOUT_MS))
  186. DRM_ERROR("Timed out: waiting for media to ack.\n");
  187. }
  188. }
  189. static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
  190. int fw_engine)
  191. {
  192. /* Check for Render Engine */
  193. if (FORCEWAKE_RENDER & fw_engine)
  194. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  195. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  196. /* Check for Media Engine */
  197. if (FORCEWAKE_MEDIA & fw_engine)
  198. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  199. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  200. /* something from same cacheline, but !FORCEWAKE_VLV */
  201. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  202. if (!IS_CHERRYVIEW(dev_priv->dev))
  203. gen6_gt_check_fifodbg(dev_priv);
  204. }
  205. static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  206. {
  207. unsigned long irqflags;
  208. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  209. if (fw_engine & FORCEWAKE_RENDER &&
  210. dev_priv->uncore.fw_rendercount++ != 0)
  211. fw_engine &= ~FORCEWAKE_RENDER;
  212. if (fw_engine & FORCEWAKE_MEDIA &&
  213. dev_priv->uncore.fw_mediacount++ != 0)
  214. fw_engine &= ~FORCEWAKE_MEDIA;
  215. if (fw_engine)
  216. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
  217. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  218. }
  219. static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  220. {
  221. unsigned long irqflags;
  222. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  223. if (fw_engine & FORCEWAKE_RENDER) {
  224. WARN_ON(!dev_priv->uncore.fw_rendercount);
  225. if (--dev_priv->uncore.fw_rendercount != 0)
  226. fw_engine &= ~FORCEWAKE_RENDER;
  227. }
  228. if (fw_engine & FORCEWAKE_MEDIA) {
  229. WARN_ON(!dev_priv->uncore.fw_mediacount);
  230. if (--dev_priv->uncore.fw_mediacount != 0)
  231. fw_engine &= ~FORCEWAKE_MEDIA;
  232. }
  233. if (fw_engine)
  234. dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
  235. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  236. }
  237. static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  238. {
  239. __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
  240. _MASKED_BIT_DISABLE(0xffff));
  241. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
  242. _MASKED_BIT_DISABLE(0xffff));
  243. __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
  244. _MASKED_BIT_DISABLE(0xffff));
  245. }
  246. static void
  247. __gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  248. {
  249. /* Check for Render Engine */
  250. if (FORCEWAKE_RENDER & fw_engine) {
  251. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  252. FORCEWAKE_ACK_RENDER_GEN9) &
  253. FORCEWAKE_KERNEL) == 0,
  254. FORCEWAKE_ACK_TIMEOUT_MS))
  255. DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  256. __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
  257. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  258. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  259. FORCEWAKE_ACK_RENDER_GEN9) &
  260. FORCEWAKE_KERNEL),
  261. FORCEWAKE_ACK_TIMEOUT_MS))
  262. DRM_ERROR("Timed out: waiting for Render to ack.\n");
  263. }
  264. /* Check for Media Engine */
  265. if (FORCEWAKE_MEDIA & fw_engine) {
  266. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  267. FORCEWAKE_ACK_MEDIA_GEN9) &
  268. FORCEWAKE_KERNEL) == 0,
  269. FORCEWAKE_ACK_TIMEOUT_MS))
  270. DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
  271. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
  272. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  273. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  274. FORCEWAKE_ACK_MEDIA_GEN9) &
  275. FORCEWAKE_KERNEL),
  276. FORCEWAKE_ACK_TIMEOUT_MS))
  277. DRM_ERROR("Timed out: waiting for Media to ack.\n");
  278. }
  279. /* Check for Blitter Engine */
  280. if (FORCEWAKE_BLITTER & fw_engine) {
  281. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  282. FORCEWAKE_ACK_BLITTER_GEN9) &
  283. FORCEWAKE_KERNEL) == 0,
  284. FORCEWAKE_ACK_TIMEOUT_MS))
  285. DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
  286. __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
  287. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  288. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  289. FORCEWAKE_ACK_BLITTER_GEN9) &
  290. FORCEWAKE_KERNEL),
  291. FORCEWAKE_ACK_TIMEOUT_MS))
  292. DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
  293. }
  294. }
  295. static void
  296. __gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  297. {
  298. /* Check for Render Engine */
  299. if (FORCEWAKE_RENDER & fw_engine)
  300. __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
  301. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  302. /* Check for Media Engine */
  303. if (FORCEWAKE_MEDIA & fw_engine)
  304. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
  305. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  306. /* Check for Blitter Engine */
  307. if (FORCEWAKE_BLITTER & fw_engine)
  308. __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
  309. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  310. }
  311. static void
  312. gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  313. {
  314. unsigned long irqflags;
  315. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  316. if (FORCEWAKE_RENDER & fw_engine) {
  317. if (dev_priv->uncore.fw_rendercount++ == 0)
  318. dev_priv->uncore.funcs.force_wake_get(dev_priv,
  319. FORCEWAKE_RENDER);
  320. }
  321. if (FORCEWAKE_MEDIA & fw_engine) {
  322. if (dev_priv->uncore.fw_mediacount++ == 0)
  323. dev_priv->uncore.funcs.force_wake_get(dev_priv,
  324. FORCEWAKE_MEDIA);
  325. }
  326. if (FORCEWAKE_BLITTER & fw_engine) {
  327. if (dev_priv->uncore.fw_blittercount++ == 0)
  328. dev_priv->uncore.funcs.force_wake_get(dev_priv,
  329. FORCEWAKE_BLITTER);
  330. }
  331. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  332. }
  333. static void
  334. gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  335. {
  336. unsigned long irqflags;
  337. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  338. if (FORCEWAKE_RENDER & fw_engine) {
  339. WARN_ON(dev_priv->uncore.fw_rendercount == 0);
  340. if (--dev_priv->uncore.fw_rendercount == 0)
  341. dev_priv->uncore.funcs.force_wake_put(dev_priv,
  342. FORCEWAKE_RENDER);
  343. }
  344. if (FORCEWAKE_MEDIA & fw_engine) {
  345. WARN_ON(dev_priv->uncore.fw_mediacount == 0);
  346. if (--dev_priv->uncore.fw_mediacount == 0)
  347. dev_priv->uncore.funcs.force_wake_put(dev_priv,
  348. FORCEWAKE_MEDIA);
  349. }
  350. if (FORCEWAKE_BLITTER & fw_engine) {
  351. WARN_ON(dev_priv->uncore.fw_blittercount == 0);
  352. if (--dev_priv->uncore.fw_blittercount == 0)
  353. dev_priv->uncore.funcs.force_wake_put(dev_priv,
  354. FORCEWAKE_BLITTER);
  355. }
  356. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  357. }
  358. static void gen6_force_wake_timer(unsigned long arg)
  359. {
  360. struct drm_i915_private *dev_priv = (void *)arg;
  361. unsigned long irqflags;
  362. assert_device_not_suspended(dev_priv);
  363. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  364. WARN_ON(!dev_priv->uncore.forcewake_count);
  365. if (--dev_priv->uncore.forcewake_count == 0)
  366. dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
  367. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  368. intel_runtime_pm_put(dev_priv);
  369. }
  370. void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
  371. {
  372. struct drm_i915_private *dev_priv = dev->dev_private;
  373. unsigned long irqflags;
  374. if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
  375. gen6_force_wake_timer((unsigned long)dev_priv);
  376. /* Hold uncore.lock across reset to prevent any register access
  377. * with forcewake not set correctly
  378. */
  379. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  380. if (IS_VALLEYVIEW(dev))
  381. vlv_force_wake_reset(dev_priv);
  382. else if (IS_GEN6(dev) || IS_GEN7(dev))
  383. __gen6_gt_force_wake_reset(dev_priv);
  384. if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
  385. __gen7_gt_force_wake_mt_reset(dev_priv);
  386. if (IS_GEN9(dev))
  387. __gen9_gt_force_wake_mt_reset(dev_priv);
  388. if (restore) { /* If reset with a user forcewake, try to restore */
  389. unsigned fw = 0;
  390. if (IS_VALLEYVIEW(dev)) {
  391. if (dev_priv->uncore.fw_rendercount)
  392. fw |= FORCEWAKE_RENDER;
  393. if (dev_priv->uncore.fw_mediacount)
  394. fw |= FORCEWAKE_MEDIA;
  395. } else if (IS_GEN9(dev)) {
  396. if (dev_priv->uncore.fw_rendercount)
  397. fw |= FORCEWAKE_RENDER;
  398. if (dev_priv->uncore.fw_mediacount)
  399. fw |= FORCEWAKE_MEDIA;
  400. if (dev_priv->uncore.fw_blittercount)
  401. fw |= FORCEWAKE_BLITTER;
  402. } else {
  403. if (dev_priv->uncore.forcewake_count)
  404. fw = FORCEWAKE_ALL;
  405. }
  406. if (fw)
  407. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  408. if (IS_GEN6(dev) || IS_GEN7(dev))
  409. dev_priv->uncore.fifo_count =
  410. __raw_i915_read32(dev_priv, GTFIFOCTL) &
  411. GT_FIFO_FREE_ENTRIES_MASK;
  412. }
  413. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  414. }
  415. static void __intel_uncore_early_sanitize(struct drm_device *dev,
  416. bool restore_forcewake)
  417. {
  418. struct drm_i915_private *dev_priv = dev->dev_private;
  419. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  420. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  421. if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
  422. (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
  423. /* The docs do not explain exactly how the calculation can be
  424. * made. It is somewhat guessable, but for now, it's always
  425. * 128MB.
  426. * NB: We can't write IDICR yet because we do not have gt funcs
  427. * set up */
  428. dev_priv->ellc_size = 128;
  429. DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  430. }
  431. /* clear out old GT FIFO errors */
  432. if (IS_GEN6(dev) || IS_GEN7(dev))
  433. __raw_i915_write32(dev_priv, GTFIFODBG,
  434. __raw_i915_read32(dev_priv, GTFIFODBG));
  435. intel_uncore_forcewake_reset(dev, restore_forcewake);
  436. }
  437. void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
  438. {
  439. __intel_uncore_early_sanitize(dev, restore_forcewake);
  440. i915_check_and_clear_faults(dev);
  441. }
  442. void intel_uncore_sanitize(struct drm_device *dev)
  443. {
  444. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  445. intel_disable_gt_powersave(dev);
  446. }
  447. /*
  448. * Generally this is called implicitly by the register read function. However,
  449. * if some sequence requires the GT to not power down then this function should
  450. * be called at the beginning of the sequence followed by a call to
  451. * gen6_gt_force_wake_put() at the end of the sequence.
  452. */
  453. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  454. {
  455. unsigned long irqflags;
  456. if (!dev_priv->uncore.funcs.force_wake_get)
  457. return;
  458. intel_runtime_pm_get(dev_priv);
  459. /* Redirect to Gen9 specific routine */
  460. if (IS_GEN9(dev_priv->dev))
  461. return gen9_force_wake_get(dev_priv, fw_engine);
  462. /* Redirect to VLV specific routine */
  463. if (IS_VALLEYVIEW(dev_priv->dev))
  464. return vlv_force_wake_get(dev_priv, fw_engine);
  465. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  466. if (dev_priv->uncore.forcewake_count++ == 0)
  467. dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
  468. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  469. }
  470. /*
  471. * see gen6_gt_force_wake_get()
  472. */
  473. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  474. {
  475. unsigned long irqflags;
  476. bool delayed = false;
  477. if (!dev_priv->uncore.funcs.force_wake_put)
  478. return;
  479. /* Redirect to Gen9 specific routine */
  480. if (IS_GEN9(dev_priv->dev)) {
  481. gen9_force_wake_put(dev_priv, fw_engine);
  482. goto out;
  483. }
  484. /* Redirect to VLV specific routine */
  485. if (IS_VALLEYVIEW(dev_priv->dev)) {
  486. vlv_force_wake_put(dev_priv, fw_engine);
  487. goto out;
  488. }
  489. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  490. WARN_ON(!dev_priv->uncore.forcewake_count);
  491. if (--dev_priv->uncore.forcewake_count == 0) {
  492. dev_priv->uncore.forcewake_count++;
  493. delayed = true;
  494. mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
  495. jiffies + 1);
  496. }
  497. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  498. out:
  499. if (!delayed)
  500. intel_runtime_pm_put(dev_priv);
  501. }
  502. void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
  503. {
  504. if (!dev_priv->uncore.funcs.force_wake_get)
  505. return;
  506. WARN_ON(dev_priv->uncore.forcewake_count > 0);
  507. }
  508. /* We give fast paths for the really cool registers */
  509. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  510. ((reg) < 0x40000 && (reg) != FORCEWAKE)
  511. #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
  512. #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
  513. (REG_RANGE((reg), 0x2000, 0x4000) || \
  514. REG_RANGE((reg), 0x5000, 0x8000) || \
  515. REG_RANGE((reg), 0xB000, 0x12000) || \
  516. REG_RANGE((reg), 0x2E000, 0x30000))
  517. #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
  518. (REG_RANGE((reg), 0x12000, 0x14000) || \
  519. REG_RANGE((reg), 0x22000, 0x24000) || \
  520. REG_RANGE((reg), 0x30000, 0x40000))
  521. #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
  522. (REG_RANGE((reg), 0x2000, 0x4000) || \
  523. REG_RANGE((reg), 0x5000, 0x8000) || \
  524. REG_RANGE((reg), 0x8300, 0x8500) || \
  525. REG_RANGE((reg), 0xB000, 0xC000) || \
  526. REG_RANGE((reg), 0xE000, 0xE800))
  527. #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
  528. (REG_RANGE((reg), 0x8800, 0x8900) || \
  529. REG_RANGE((reg), 0xD000, 0xD800) || \
  530. REG_RANGE((reg), 0x12000, 0x14000) || \
  531. REG_RANGE((reg), 0x1A000, 0x1C000) || \
  532. REG_RANGE((reg), 0x1E800, 0x1EA00) || \
  533. REG_RANGE((reg), 0x30000, 0x40000))
  534. #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
  535. (REG_RANGE((reg), 0x4000, 0x5000) || \
  536. REG_RANGE((reg), 0x8000, 0x8300) || \
  537. REG_RANGE((reg), 0x8500, 0x8600) || \
  538. REG_RANGE((reg), 0x9000, 0xB000) || \
  539. REG_RANGE((reg), 0xC000, 0xC800) || \
  540. REG_RANGE((reg), 0xF000, 0x10000) || \
  541. REG_RANGE((reg), 0x14000, 0x14400) || \
  542. REG_RANGE((reg), 0x22000, 0x24000))
  543. #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
  544. REG_RANGE((reg), 0xC00, 0x2000)
  545. #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
  546. (REG_RANGE((reg), 0x2000, 0x4000) || \
  547. REG_RANGE((reg), 0x5200, 0x8000) || \
  548. REG_RANGE((reg), 0x8300, 0x8500) || \
  549. REG_RANGE((reg), 0x8C00, 0x8D00) || \
  550. REG_RANGE((reg), 0xB000, 0xB480) || \
  551. REG_RANGE((reg), 0xE000, 0xE800))
  552. #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
  553. (REG_RANGE((reg), 0x8800, 0x8A00) || \
  554. REG_RANGE((reg), 0xD000, 0xD800) || \
  555. REG_RANGE((reg), 0x12000, 0x14000) || \
  556. REG_RANGE((reg), 0x1A000, 0x1EA00) || \
  557. REG_RANGE((reg), 0x30000, 0x40000))
  558. #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
  559. REG_RANGE((reg), 0x9400, 0x9800)
  560. #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
  561. ((reg) < 0x40000 &&\
  562. !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
  563. !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
  564. !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
  565. !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
  566. static void
  567. ilk_dummy_write(struct drm_i915_private *dev_priv)
  568. {
  569. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  570. * the chip from rc6 before touching it for real. MI_MODE is masked,
  571. * hence harmless to write 0 into. */
  572. __raw_i915_write32(dev_priv, MI_MODE, 0);
  573. }
  574. static void
  575. hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
  576. bool before)
  577. {
  578. const char *op = read ? "reading" : "writing to";
  579. const char *when = before ? "before" : "after";
  580. if (!i915.mmio_debug)
  581. return;
  582. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  583. WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
  584. when, op, reg);
  585. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  586. }
  587. }
  588. static void
  589. hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
  590. {
  591. if (i915.mmio_debug)
  592. return;
  593. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  594. DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
  595. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  596. }
  597. }
  598. #define REG_READ_HEADER(x) \
  599. unsigned long irqflags; \
  600. u##x val = 0; \
  601. assert_device_not_suspended(dev_priv); \
  602. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  603. #define REG_READ_FOOTER \
  604. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  605. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  606. return val
  607. #define __gen4_read(x) \
  608. static u##x \
  609. gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  610. REG_READ_HEADER(x); \
  611. val = __raw_i915_read##x(dev_priv, reg); \
  612. REG_READ_FOOTER; \
  613. }
  614. #define __gen5_read(x) \
  615. static u##x \
  616. gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  617. REG_READ_HEADER(x); \
  618. ilk_dummy_write(dev_priv); \
  619. val = __raw_i915_read##x(dev_priv, reg); \
  620. REG_READ_FOOTER; \
  621. }
  622. #define __gen6_read(x) \
  623. static u##x \
  624. gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  625. REG_READ_HEADER(x); \
  626. hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
  627. if (dev_priv->uncore.forcewake_count == 0 && \
  628. NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  629. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  630. FORCEWAKE_ALL); \
  631. val = __raw_i915_read##x(dev_priv, reg); \
  632. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  633. FORCEWAKE_ALL); \
  634. } else { \
  635. val = __raw_i915_read##x(dev_priv, reg); \
  636. } \
  637. hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
  638. REG_READ_FOOTER; \
  639. }
  640. #define __vlv_read(x) \
  641. static u##x \
  642. vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  643. unsigned fwengine = 0; \
  644. REG_READ_HEADER(x); \
  645. if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
  646. if (dev_priv->uncore.fw_rendercount == 0) \
  647. fwengine = FORCEWAKE_RENDER; \
  648. } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
  649. if (dev_priv->uncore.fw_mediacount == 0) \
  650. fwengine = FORCEWAKE_MEDIA; \
  651. } \
  652. if (fwengine) \
  653. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  654. val = __raw_i915_read##x(dev_priv, reg); \
  655. if (fwengine) \
  656. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  657. REG_READ_FOOTER; \
  658. }
  659. #define __chv_read(x) \
  660. static u##x \
  661. chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  662. unsigned fwengine = 0; \
  663. REG_READ_HEADER(x); \
  664. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  665. if (dev_priv->uncore.fw_rendercount == 0) \
  666. fwengine = FORCEWAKE_RENDER; \
  667. } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  668. if (dev_priv->uncore.fw_mediacount == 0) \
  669. fwengine = FORCEWAKE_MEDIA; \
  670. } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  671. if (dev_priv->uncore.fw_rendercount == 0) \
  672. fwengine |= FORCEWAKE_RENDER; \
  673. if (dev_priv->uncore.fw_mediacount == 0) \
  674. fwengine |= FORCEWAKE_MEDIA; \
  675. } \
  676. if (fwengine) \
  677. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  678. val = __raw_i915_read##x(dev_priv, reg); \
  679. if (fwengine) \
  680. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  681. REG_READ_FOOTER; \
  682. }
  683. #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
  684. ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
  685. #define __gen9_read(x) \
  686. static u##x \
  687. gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  688. REG_READ_HEADER(x); \
  689. if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  690. val = __raw_i915_read##x(dev_priv, reg); \
  691. } else { \
  692. unsigned fwengine = 0; \
  693. if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
  694. if (dev_priv->uncore.fw_rendercount == 0) \
  695. fwengine = FORCEWAKE_RENDER; \
  696. } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
  697. if (dev_priv->uncore.fw_mediacount == 0) \
  698. fwengine = FORCEWAKE_MEDIA; \
  699. } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
  700. if (dev_priv->uncore.fw_rendercount == 0) \
  701. fwengine |= FORCEWAKE_RENDER; \
  702. if (dev_priv->uncore.fw_mediacount == 0) \
  703. fwengine |= FORCEWAKE_MEDIA; \
  704. } else { \
  705. if (dev_priv->uncore.fw_blittercount == 0) \
  706. fwengine = FORCEWAKE_BLITTER; \
  707. } \
  708. if (fwengine) \
  709. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  710. val = __raw_i915_read##x(dev_priv, reg); \
  711. if (fwengine) \
  712. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  713. } \
  714. REG_READ_FOOTER; \
  715. }
  716. __gen9_read(8)
  717. __gen9_read(16)
  718. __gen9_read(32)
  719. __gen9_read(64)
  720. __chv_read(8)
  721. __chv_read(16)
  722. __chv_read(32)
  723. __chv_read(64)
  724. __vlv_read(8)
  725. __vlv_read(16)
  726. __vlv_read(32)
  727. __vlv_read(64)
  728. __gen6_read(8)
  729. __gen6_read(16)
  730. __gen6_read(32)
  731. __gen6_read(64)
  732. __gen5_read(8)
  733. __gen5_read(16)
  734. __gen5_read(32)
  735. __gen5_read(64)
  736. __gen4_read(8)
  737. __gen4_read(16)
  738. __gen4_read(32)
  739. __gen4_read(64)
  740. #undef __gen9_read
  741. #undef __chv_read
  742. #undef __vlv_read
  743. #undef __gen6_read
  744. #undef __gen5_read
  745. #undef __gen4_read
  746. #undef REG_READ_FOOTER
  747. #undef REG_READ_HEADER
  748. #define REG_WRITE_HEADER \
  749. unsigned long irqflags; \
  750. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  751. assert_device_not_suspended(dev_priv); \
  752. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  753. #define REG_WRITE_FOOTER \
  754. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  755. #define __gen4_write(x) \
  756. static void \
  757. gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  758. REG_WRITE_HEADER; \
  759. __raw_i915_write##x(dev_priv, reg, val); \
  760. REG_WRITE_FOOTER; \
  761. }
  762. #define __gen5_write(x) \
  763. static void \
  764. gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  765. REG_WRITE_HEADER; \
  766. ilk_dummy_write(dev_priv); \
  767. __raw_i915_write##x(dev_priv, reg, val); \
  768. REG_WRITE_FOOTER; \
  769. }
  770. #define __gen6_write(x) \
  771. static void \
  772. gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  773. u32 __fifo_ret = 0; \
  774. REG_WRITE_HEADER; \
  775. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  776. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  777. } \
  778. __raw_i915_write##x(dev_priv, reg, val); \
  779. if (unlikely(__fifo_ret)) { \
  780. gen6_gt_check_fifodbg(dev_priv); \
  781. } \
  782. REG_WRITE_FOOTER; \
  783. }
  784. #define __hsw_write(x) \
  785. static void \
  786. hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  787. u32 __fifo_ret = 0; \
  788. REG_WRITE_HEADER; \
  789. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  790. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  791. } \
  792. hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  793. __raw_i915_write##x(dev_priv, reg, val); \
  794. if (unlikely(__fifo_ret)) { \
  795. gen6_gt_check_fifodbg(dev_priv); \
  796. } \
  797. hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  798. hsw_unclaimed_reg_detect(dev_priv); \
  799. REG_WRITE_FOOTER; \
  800. }
  801. static const u32 gen8_shadowed_regs[] = {
  802. FORCEWAKE_MT,
  803. GEN6_RPNSWREQ,
  804. GEN6_RC_VIDEO_FREQ,
  805. RING_TAIL(RENDER_RING_BASE),
  806. RING_TAIL(GEN6_BSD_RING_BASE),
  807. RING_TAIL(VEBOX_RING_BASE),
  808. RING_TAIL(BLT_RING_BASE),
  809. /* TODO: Other registers are not yet used */
  810. };
  811. static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  812. {
  813. int i;
  814. for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  815. if (reg == gen8_shadowed_regs[i])
  816. return true;
  817. return false;
  818. }
  819. #define __gen8_write(x) \
  820. static void \
  821. gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  822. REG_WRITE_HEADER; \
  823. hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  824. if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
  825. if (dev_priv->uncore.forcewake_count == 0) \
  826. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  827. FORCEWAKE_ALL); \
  828. __raw_i915_write##x(dev_priv, reg, val); \
  829. if (dev_priv->uncore.forcewake_count == 0) \
  830. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  831. FORCEWAKE_ALL); \
  832. } else { \
  833. __raw_i915_write##x(dev_priv, reg, val); \
  834. } \
  835. hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  836. hsw_unclaimed_reg_detect(dev_priv); \
  837. REG_WRITE_FOOTER; \
  838. }
  839. #define __chv_write(x) \
  840. static void \
  841. chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  842. unsigned fwengine = 0; \
  843. bool shadowed = is_gen8_shadowed(dev_priv, reg); \
  844. REG_WRITE_HEADER; \
  845. if (!shadowed) { \
  846. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  847. if (dev_priv->uncore.fw_rendercount == 0) \
  848. fwengine = FORCEWAKE_RENDER; \
  849. } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  850. if (dev_priv->uncore.fw_mediacount == 0) \
  851. fwengine = FORCEWAKE_MEDIA; \
  852. } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  853. if (dev_priv->uncore.fw_rendercount == 0) \
  854. fwengine |= FORCEWAKE_RENDER; \
  855. if (dev_priv->uncore.fw_mediacount == 0) \
  856. fwengine |= FORCEWAKE_MEDIA; \
  857. } \
  858. } \
  859. if (fwengine) \
  860. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  861. __raw_i915_write##x(dev_priv, reg, val); \
  862. if (fwengine) \
  863. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  864. REG_WRITE_FOOTER; \
  865. }
  866. static const u32 gen9_shadowed_regs[] = {
  867. RING_TAIL(RENDER_RING_BASE),
  868. RING_TAIL(GEN6_BSD_RING_BASE),
  869. RING_TAIL(VEBOX_RING_BASE),
  870. RING_TAIL(BLT_RING_BASE),
  871. FORCEWAKE_BLITTER_GEN9,
  872. FORCEWAKE_RENDER_GEN9,
  873. FORCEWAKE_MEDIA_GEN9,
  874. GEN6_RPNSWREQ,
  875. GEN6_RC_VIDEO_FREQ,
  876. /* TODO: Other registers are not yet used */
  877. };
  878. static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  879. {
  880. int i;
  881. for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
  882. if (reg == gen9_shadowed_regs[i])
  883. return true;
  884. return false;
  885. }
  886. #define __gen9_write(x) \
  887. static void \
  888. gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
  889. bool trace) { \
  890. REG_WRITE_HEADER; \
  891. if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
  892. is_gen9_shadowed(dev_priv, reg)) { \
  893. __raw_i915_write##x(dev_priv, reg, val); \
  894. } else { \
  895. unsigned fwengine = 0; \
  896. if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
  897. if (dev_priv->uncore.fw_rendercount == 0) \
  898. fwengine = FORCEWAKE_RENDER; \
  899. } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
  900. if (dev_priv->uncore.fw_mediacount == 0) \
  901. fwengine = FORCEWAKE_MEDIA; \
  902. } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
  903. if (dev_priv->uncore.fw_rendercount == 0) \
  904. fwengine |= FORCEWAKE_RENDER; \
  905. if (dev_priv->uncore.fw_mediacount == 0) \
  906. fwengine |= FORCEWAKE_MEDIA; \
  907. } else { \
  908. if (dev_priv->uncore.fw_blittercount == 0) \
  909. fwengine = FORCEWAKE_BLITTER; \
  910. } \
  911. if (fwengine) \
  912. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  913. fwengine); \
  914. __raw_i915_write##x(dev_priv, reg, val); \
  915. if (fwengine) \
  916. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  917. fwengine); \
  918. } \
  919. REG_WRITE_FOOTER; \
  920. }
  921. __gen9_write(8)
  922. __gen9_write(16)
  923. __gen9_write(32)
  924. __gen9_write(64)
  925. __chv_write(8)
  926. __chv_write(16)
  927. __chv_write(32)
  928. __chv_write(64)
  929. __gen8_write(8)
  930. __gen8_write(16)
  931. __gen8_write(32)
  932. __gen8_write(64)
  933. __hsw_write(8)
  934. __hsw_write(16)
  935. __hsw_write(32)
  936. __hsw_write(64)
  937. __gen6_write(8)
  938. __gen6_write(16)
  939. __gen6_write(32)
  940. __gen6_write(64)
  941. __gen5_write(8)
  942. __gen5_write(16)
  943. __gen5_write(32)
  944. __gen5_write(64)
  945. __gen4_write(8)
  946. __gen4_write(16)
  947. __gen4_write(32)
  948. __gen4_write(64)
  949. #undef __gen9_write
  950. #undef __chv_write
  951. #undef __gen8_write
  952. #undef __hsw_write
  953. #undef __gen6_write
  954. #undef __gen5_write
  955. #undef __gen4_write
  956. #undef REG_WRITE_FOOTER
  957. #undef REG_WRITE_HEADER
  958. #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
  959. do { \
  960. dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
  961. dev_priv->uncore.funcs.mmio_writew = x##_write16; \
  962. dev_priv->uncore.funcs.mmio_writel = x##_write32; \
  963. dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
  964. } while (0)
  965. #define ASSIGN_READ_MMIO_VFUNCS(x) \
  966. do { \
  967. dev_priv->uncore.funcs.mmio_readb = x##_read8; \
  968. dev_priv->uncore.funcs.mmio_readw = x##_read16; \
  969. dev_priv->uncore.funcs.mmio_readl = x##_read32; \
  970. dev_priv->uncore.funcs.mmio_readq = x##_read64; \
  971. } while (0)
  972. void intel_uncore_init(struct drm_device *dev)
  973. {
  974. struct drm_i915_private *dev_priv = dev->dev_private;
  975. setup_timer(&dev_priv->uncore.force_wake_timer,
  976. gen6_force_wake_timer, (unsigned long)dev_priv);
  977. __intel_uncore_early_sanitize(dev, false);
  978. if (IS_GEN9(dev)) {
  979. dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
  980. dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
  981. } else if (IS_VALLEYVIEW(dev)) {
  982. dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
  983. dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
  984. } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  985. dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
  986. dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
  987. } else if (IS_IVYBRIDGE(dev)) {
  988. u32 ecobus;
  989. /* IVB configs may use multi-threaded forcewake */
  990. /* A small trick here - if the bios hasn't configured
  991. * MT forcewake, and if the device is in RC6, then
  992. * force_wake_mt_get will not wake the device and the
  993. * ECOBUS read will return zero. Which will be
  994. * (correctly) interpreted by the test below as MT
  995. * forcewake being disabled.
  996. */
  997. mutex_lock(&dev->struct_mutex);
  998. __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
  999. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  1000. __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
  1001. mutex_unlock(&dev->struct_mutex);
  1002. if (ecobus & FORCEWAKE_MT_ENABLE) {
  1003. dev_priv->uncore.funcs.force_wake_get =
  1004. __gen7_gt_force_wake_mt_get;
  1005. dev_priv->uncore.funcs.force_wake_put =
  1006. __gen7_gt_force_wake_mt_put;
  1007. } else {
  1008. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  1009. DRM_INFO("when using vblank-synced partial screen updates.\n");
  1010. dev_priv->uncore.funcs.force_wake_get =
  1011. __gen6_gt_force_wake_get;
  1012. dev_priv->uncore.funcs.force_wake_put =
  1013. __gen6_gt_force_wake_put;
  1014. }
  1015. } else if (IS_GEN6(dev)) {
  1016. dev_priv->uncore.funcs.force_wake_get =
  1017. __gen6_gt_force_wake_get;
  1018. dev_priv->uncore.funcs.force_wake_put =
  1019. __gen6_gt_force_wake_put;
  1020. }
  1021. switch (INTEL_INFO(dev)->gen) {
  1022. default:
  1023. WARN_ON(1);
  1024. return;
  1025. case 9:
  1026. ASSIGN_WRITE_MMIO_VFUNCS(gen9);
  1027. ASSIGN_READ_MMIO_VFUNCS(gen9);
  1028. break;
  1029. case 8:
  1030. if (IS_CHERRYVIEW(dev)) {
  1031. ASSIGN_WRITE_MMIO_VFUNCS(chv);
  1032. ASSIGN_READ_MMIO_VFUNCS(chv);
  1033. } else {
  1034. ASSIGN_WRITE_MMIO_VFUNCS(gen8);
  1035. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1036. }
  1037. break;
  1038. case 7:
  1039. case 6:
  1040. if (IS_HASWELL(dev)) {
  1041. ASSIGN_WRITE_MMIO_VFUNCS(hsw);
  1042. } else {
  1043. ASSIGN_WRITE_MMIO_VFUNCS(gen6);
  1044. }
  1045. if (IS_VALLEYVIEW(dev)) {
  1046. ASSIGN_READ_MMIO_VFUNCS(vlv);
  1047. } else {
  1048. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1049. }
  1050. break;
  1051. case 5:
  1052. ASSIGN_WRITE_MMIO_VFUNCS(gen5);
  1053. ASSIGN_READ_MMIO_VFUNCS(gen5);
  1054. break;
  1055. case 4:
  1056. case 3:
  1057. case 2:
  1058. ASSIGN_WRITE_MMIO_VFUNCS(gen4);
  1059. ASSIGN_READ_MMIO_VFUNCS(gen4);
  1060. break;
  1061. }
  1062. i915_check_and_clear_faults(dev);
  1063. }
  1064. #undef ASSIGN_WRITE_MMIO_VFUNCS
  1065. #undef ASSIGN_READ_MMIO_VFUNCS
  1066. void intel_uncore_fini(struct drm_device *dev)
  1067. {
  1068. /* Paranoia: make sure we have disabled everything before we exit. */
  1069. intel_uncore_sanitize(dev);
  1070. intel_uncore_forcewake_reset(dev, false);
  1071. }
  1072. #define GEN_RANGE(l, h) GENMASK(h, l)
  1073. static const struct register_whitelist {
  1074. uint64_t offset;
  1075. uint32_t size;
  1076. /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  1077. uint32_t gen_bitmask;
  1078. } whitelist[] = {
  1079. { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
  1080. };
  1081. int i915_reg_read_ioctl(struct drm_device *dev,
  1082. void *data, struct drm_file *file)
  1083. {
  1084. struct drm_i915_private *dev_priv = dev->dev_private;
  1085. struct drm_i915_reg_read *reg = data;
  1086. struct register_whitelist const *entry = whitelist;
  1087. int i, ret = 0;
  1088. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  1089. if (entry->offset == reg->offset &&
  1090. (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  1091. break;
  1092. }
  1093. if (i == ARRAY_SIZE(whitelist))
  1094. return -EINVAL;
  1095. intel_runtime_pm_get(dev_priv);
  1096. switch (entry->size) {
  1097. case 8:
  1098. reg->val = I915_READ64(reg->offset);
  1099. break;
  1100. case 4:
  1101. reg->val = I915_READ(reg->offset);
  1102. break;
  1103. case 2:
  1104. reg->val = I915_READ16(reg->offset);
  1105. break;
  1106. case 1:
  1107. reg->val = I915_READ8(reg->offset);
  1108. break;
  1109. default:
  1110. WARN_ON(1);
  1111. ret = -EINVAL;
  1112. goto out;
  1113. }
  1114. out:
  1115. intel_runtime_pm_put(dev_priv);
  1116. return ret;
  1117. }
  1118. int i915_get_reset_stats_ioctl(struct drm_device *dev,
  1119. void *data, struct drm_file *file)
  1120. {
  1121. struct drm_i915_private *dev_priv = dev->dev_private;
  1122. struct drm_i915_reset_stats *args = data;
  1123. struct i915_ctx_hang_stats *hs;
  1124. struct intel_context *ctx;
  1125. int ret;
  1126. if (args->flags || args->pad)
  1127. return -EINVAL;
  1128. if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
  1129. return -EPERM;
  1130. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1131. if (ret)
  1132. return ret;
  1133. ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
  1134. if (IS_ERR(ctx)) {
  1135. mutex_unlock(&dev->struct_mutex);
  1136. return PTR_ERR(ctx);
  1137. }
  1138. hs = &ctx->hang_stats;
  1139. if (capable(CAP_SYS_ADMIN))
  1140. args->reset_count = i915_reset_count(&dev_priv->gpu_error);
  1141. else
  1142. args->reset_count = 0;
  1143. args->batch_active = hs->batch_active;
  1144. args->batch_pending = hs->batch_pending;
  1145. mutex_unlock(&dev->struct_mutex);
  1146. return 0;
  1147. }
  1148. static int i915_reset_complete(struct drm_device *dev)
  1149. {
  1150. u8 gdrst;
  1151. pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
  1152. return (gdrst & GRDOM_RESET_STATUS) == 0;
  1153. }
  1154. static int i915_do_reset(struct drm_device *dev)
  1155. {
  1156. /* assert reset for at least 20 usec */
  1157. pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1158. udelay(20);
  1159. pci_write_config_byte(dev->pdev, I915_GDRST, 0);
  1160. return wait_for(i915_reset_complete(dev), 500);
  1161. }
  1162. static int g4x_reset_complete(struct drm_device *dev)
  1163. {
  1164. u8 gdrst;
  1165. pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
  1166. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  1167. }
  1168. static int g33_do_reset(struct drm_device *dev)
  1169. {
  1170. pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1171. return wait_for(g4x_reset_complete(dev), 500);
  1172. }
  1173. static int g4x_do_reset(struct drm_device *dev)
  1174. {
  1175. struct drm_i915_private *dev_priv = dev->dev_private;
  1176. int ret;
  1177. pci_write_config_byte(dev->pdev, I915_GDRST,
  1178. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1179. ret = wait_for(g4x_reset_complete(dev), 500);
  1180. if (ret)
  1181. return ret;
  1182. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1183. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  1184. POSTING_READ(VDECCLK_GATE_D);
  1185. pci_write_config_byte(dev->pdev, I915_GDRST,
  1186. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1187. ret = wait_for(g4x_reset_complete(dev), 500);
  1188. if (ret)
  1189. return ret;
  1190. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1191. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  1192. POSTING_READ(VDECCLK_GATE_D);
  1193. pci_write_config_byte(dev->pdev, I915_GDRST, 0);
  1194. return 0;
  1195. }
  1196. static int ironlake_do_reset(struct drm_device *dev)
  1197. {
  1198. struct drm_i915_private *dev_priv = dev->dev_private;
  1199. int ret;
  1200. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  1201. ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  1202. ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  1203. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1204. if (ret)
  1205. return ret;
  1206. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  1207. ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  1208. ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  1209. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1210. if (ret)
  1211. return ret;
  1212. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
  1213. return 0;
  1214. }
  1215. static int gen6_do_reset(struct drm_device *dev)
  1216. {
  1217. struct drm_i915_private *dev_priv = dev->dev_private;
  1218. int ret;
  1219. /* Reset the chip */
  1220. /* GEN6_GDRST is not in the gt power well, no need to check
  1221. * for fifo space for the write or forcewake the chip for
  1222. * the read
  1223. */
  1224. __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  1225. /* Spin waiting for the device to ack the reset request */
  1226. ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  1227. intel_uncore_forcewake_reset(dev, true);
  1228. return ret;
  1229. }
  1230. int intel_gpu_reset(struct drm_device *dev)
  1231. {
  1232. if (INTEL_INFO(dev)->gen >= 6)
  1233. return gen6_do_reset(dev);
  1234. else if (IS_GEN5(dev))
  1235. return ironlake_do_reset(dev);
  1236. else if (IS_G4X(dev))
  1237. return g4x_do_reset(dev);
  1238. else if (IS_G33(dev))
  1239. return g33_do_reset(dev);
  1240. else if (INTEL_INFO(dev)->gen >= 3)
  1241. return i915_do_reset(dev);
  1242. else
  1243. return -ENODEV;
  1244. }
  1245. void intel_uncore_check_errors(struct drm_device *dev)
  1246. {
  1247. struct drm_i915_private *dev_priv = dev->dev_private;
  1248. if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  1249. (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  1250. DRM_ERROR("Unclaimed register before interrupt\n");
  1251. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  1252. }
  1253. }