intel_uncore.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  26. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  27. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  28. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  29. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  30. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  31. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  32. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  34. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  35. static void
  36. assert_device_not_suspended(struct drm_i915_private *dev_priv)
  37. {
  38. WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
  39. "Device suspended\n");
  40. }
  41. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  42. {
  43. /* w/a for a sporadic read returning 0 by waiting for the GT
  44. * thread to wake up.
  45. */
  46. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
  47. GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
  48. DRM_ERROR("GT thread status wait timed out\n");
  49. }
  50. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  51. {
  52. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  53. /* something from same cacheline, but !FORCEWAKE */
  54. __raw_posting_read(dev_priv, ECOBUS);
  55. }
  56. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
  57. int fw_engine)
  58. {
  59. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
  60. FORCEWAKE_ACK_TIMEOUT_MS))
  61. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  62. __raw_i915_write32(dev_priv, FORCEWAKE, 1);
  63. /* something from same cacheline, but !FORCEWAKE */
  64. __raw_posting_read(dev_priv, ECOBUS);
  65. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
  66. FORCEWAKE_ACK_TIMEOUT_MS))
  67. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  68. /* WaRsForcewakeWaitTC0:snb */
  69. __gen6_gt_wait_for_thread_c0(dev_priv);
  70. }
  71. static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  72. {
  73. __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  74. /* something from same cacheline, but !FORCEWAKE_MT */
  75. __raw_posting_read(dev_priv, ECOBUS);
  76. }
  77. static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
  78. int fw_engine)
  79. {
  80. u32 forcewake_ack;
  81. if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
  82. forcewake_ack = FORCEWAKE_ACK_HSW;
  83. else
  84. forcewake_ack = FORCEWAKE_MT_ACK;
  85. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  86. FORCEWAKE_ACK_TIMEOUT_MS))
  87. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  88. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  89. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  90. /* something from same cacheline, but !FORCEWAKE_MT */
  91. __raw_posting_read(dev_priv, ECOBUS);
  92. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
  93. FORCEWAKE_ACK_TIMEOUT_MS))
  94. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  95. /* WaRsForcewakeWaitTC0:ivb,hsw */
  96. __gen6_gt_wait_for_thread_c0(dev_priv);
  97. }
  98. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  99. {
  100. u32 gtfifodbg;
  101. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  102. if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  103. __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  104. }
  105. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
  106. int fw_engine)
  107. {
  108. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  109. /* something from same cacheline, but !FORCEWAKE */
  110. __raw_posting_read(dev_priv, ECOBUS);
  111. gen6_gt_check_fifodbg(dev_priv);
  112. }
  113. static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
  114. int fw_engine)
  115. {
  116. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  117. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  118. /* something from same cacheline, but !FORCEWAKE_MT */
  119. __raw_posting_read(dev_priv, ECOBUS);
  120. if (IS_GEN7(dev_priv->dev))
  121. gen6_gt_check_fifodbg(dev_priv);
  122. }
  123. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  124. {
  125. int ret = 0;
  126. /* On VLV, FIFO will be shared by both SW and HW.
  127. * So, we need to read the FREE_ENTRIES everytime */
  128. if (IS_VALLEYVIEW(dev_priv->dev))
  129. dev_priv->uncore.fifo_count =
  130. __raw_i915_read32(dev_priv, GTFIFOCTL) &
  131. GT_FIFO_FREE_ENTRIES_MASK;
  132. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  133. int loop = 500;
  134. u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  135. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  136. udelay(10);
  137. fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  138. }
  139. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  140. ++ret;
  141. dev_priv->uncore.fifo_count = fifo;
  142. }
  143. dev_priv->uncore.fifo_count--;
  144. return ret;
  145. }
  146. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  147. {
  148. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  149. _MASKED_BIT_DISABLE(0xffff));
  150. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  151. _MASKED_BIT_DISABLE(0xffff));
  152. /* something from same cacheline, but !FORCEWAKE_VLV */
  153. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  154. }
  155. static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
  156. int fw_engine)
  157. {
  158. /* Check for Render Engine */
  159. if (FORCEWAKE_RENDER & fw_engine) {
  160. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  161. FORCEWAKE_ACK_VLV) &
  162. FORCEWAKE_KERNEL) == 0,
  163. FORCEWAKE_ACK_TIMEOUT_MS))
  164. DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  165. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  166. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  167. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  168. FORCEWAKE_ACK_VLV) &
  169. FORCEWAKE_KERNEL),
  170. FORCEWAKE_ACK_TIMEOUT_MS))
  171. DRM_ERROR("Timed out: waiting for Render to ack.\n");
  172. }
  173. /* Check for Media Engine */
  174. if (FORCEWAKE_MEDIA & fw_engine) {
  175. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  176. FORCEWAKE_ACK_MEDIA_VLV) &
  177. FORCEWAKE_KERNEL) == 0,
  178. FORCEWAKE_ACK_TIMEOUT_MS))
  179. DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
  180. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  181. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  182. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  183. FORCEWAKE_ACK_MEDIA_VLV) &
  184. FORCEWAKE_KERNEL),
  185. FORCEWAKE_ACK_TIMEOUT_MS))
  186. DRM_ERROR("Timed out: waiting for media to ack.\n");
  187. }
  188. }
  189. static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
  190. int fw_engine)
  191. {
  192. /* Check for Render Engine */
  193. if (FORCEWAKE_RENDER & fw_engine)
  194. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  195. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  196. /* Check for Media Engine */
  197. if (FORCEWAKE_MEDIA & fw_engine)
  198. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  199. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  200. /* something from same cacheline, but !FORCEWAKE_VLV */
  201. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  202. if (!IS_CHERRYVIEW(dev_priv->dev))
  203. gen6_gt_check_fifodbg(dev_priv);
  204. }
  205. static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  206. {
  207. unsigned long irqflags;
  208. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  209. if (fw_engine & FORCEWAKE_RENDER &&
  210. dev_priv->uncore.fw_rendercount++ != 0)
  211. fw_engine &= ~FORCEWAKE_RENDER;
  212. if (fw_engine & FORCEWAKE_MEDIA &&
  213. dev_priv->uncore.fw_mediacount++ != 0)
  214. fw_engine &= ~FORCEWAKE_MEDIA;
  215. if (fw_engine)
  216. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
  217. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  218. }
  219. static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  220. {
  221. unsigned long irqflags;
  222. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  223. if (fw_engine & FORCEWAKE_RENDER) {
  224. WARN_ON(!dev_priv->uncore.fw_rendercount);
  225. if (--dev_priv->uncore.fw_rendercount != 0)
  226. fw_engine &= ~FORCEWAKE_RENDER;
  227. }
  228. if (fw_engine & FORCEWAKE_MEDIA) {
  229. WARN_ON(!dev_priv->uncore.fw_mediacount);
  230. if (--dev_priv->uncore.fw_mediacount != 0)
  231. fw_engine &= ~FORCEWAKE_MEDIA;
  232. }
  233. if (fw_engine)
  234. dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
  235. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  236. }
  237. static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  238. {
  239. __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
  240. _MASKED_BIT_DISABLE(0xffff));
  241. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
  242. _MASKED_BIT_DISABLE(0xffff));
  243. __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
  244. _MASKED_BIT_DISABLE(0xffff));
  245. }
  246. static void
  247. __gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  248. {
  249. /* Check for Render Engine */
  250. if (FORCEWAKE_RENDER & fw_engine) {
  251. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  252. FORCEWAKE_ACK_RENDER_GEN9) &
  253. FORCEWAKE_KERNEL) == 0,
  254. FORCEWAKE_ACK_TIMEOUT_MS))
  255. DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  256. __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
  257. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  258. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  259. FORCEWAKE_ACK_RENDER_GEN9) &
  260. FORCEWAKE_KERNEL),
  261. FORCEWAKE_ACK_TIMEOUT_MS))
  262. DRM_ERROR("Timed out: waiting for Render to ack.\n");
  263. }
  264. /* Check for Media Engine */
  265. if (FORCEWAKE_MEDIA & fw_engine) {
  266. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  267. FORCEWAKE_ACK_MEDIA_GEN9) &
  268. FORCEWAKE_KERNEL) == 0,
  269. FORCEWAKE_ACK_TIMEOUT_MS))
  270. DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
  271. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
  272. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  273. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  274. FORCEWAKE_ACK_MEDIA_GEN9) &
  275. FORCEWAKE_KERNEL),
  276. FORCEWAKE_ACK_TIMEOUT_MS))
  277. DRM_ERROR("Timed out: waiting for Media to ack.\n");
  278. }
  279. /* Check for Blitter Engine */
  280. if (FORCEWAKE_BLITTER & fw_engine) {
  281. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  282. FORCEWAKE_ACK_BLITTER_GEN9) &
  283. FORCEWAKE_KERNEL) == 0,
  284. FORCEWAKE_ACK_TIMEOUT_MS))
  285. DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
  286. __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
  287. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  288. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  289. FORCEWAKE_ACK_BLITTER_GEN9) &
  290. FORCEWAKE_KERNEL),
  291. FORCEWAKE_ACK_TIMEOUT_MS))
  292. DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
  293. }
  294. }
  295. static void
  296. __gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  297. {
  298. /* Check for Render Engine */
  299. if (FORCEWAKE_RENDER & fw_engine)
  300. __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
  301. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  302. /* Check for Media Engine */
  303. if (FORCEWAKE_MEDIA & fw_engine)
  304. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
  305. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  306. /* Check for Blitter Engine */
  307. if (FORCEWAKE_BLITTER & fw_engine)
  308. __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
  309. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  310. }
  311. static void
  312. gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  313. {
  314. unsigned long irqflags;
  315. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  316. if (FORCEWAKE_RENDER & fw_engine) {
  317. if (dev_priv->uncore.fw_rendercount++ == 0)
  318. dev_priv->uncore.funcs.force_wake_get(dev_priv,
  319. FORCEWAKE_RENDER);
  320. }
  321. if (FORCEWAKE_MEDIA & fw_engine) {
  322. if (dev_priv->uncore.fw_mediacount++ == 0)
  323. dev_priv->uncore.funcs.force_wake_get(dev_priv,
  324. FORCEWAKE_MEDIA);
  325. }
  326. if (FORCEWAKE_BLITTER & fw_engine) {
  327. if (dev_priv->uncore.fw_blittercount++ == 0)
  328. dev_priv->uncore.funcs.force_wake_get(dev_priv,
  329. FORCEWAKE_BLITTER);
  330. }
  331. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  332. }
  333. static void
  334. gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  335. {
  336. unsigned long irqflags;
  337. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  338. if (FORCEWAKE_RENDER & fw_engine) {
  339. WARN_ON(dev_priv->uncore.fw_rendercount == 0);
  340. if (--dev_priv->uncore.fw_rendercount == 0)
  341. dev_priv->uncore.funcs.force_wake_put(dev_priv,
  342. FORCEWAKE_RENDER);
  343. }
  344. if (FORCEWAKE_MEDIA & fw_engine) {
  345. WARN_ON(dev_priv->uncore.fw_mediacount == 0);
  346. if (--dev_priv->uncore.fw_mediacount == 0)
  347. dev_priv->uncore.funcs.force_wake_put(dev_priv,
  348. FORCEWAKE_MEDIA);
  349. }
  350. if (FORCEWAKE_BLITTER & fw_engine) {
  351. WARN_ON(dev_priv->uncore.fw_blittercount == 0);
  352. if (--dev_priv->uncore.fw_blittercount == 0)
  353. dev_priv->uncore.funcs.force_wake_put(dev_priv,
  354. FORCEWAKE_BLITTER);
  355. }
  356. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  357. }
  358. static void gen6_force_wake_timer(unsigned long arg)
  359. {
  360. struct drm_i915_private *dev_priv = (void *)arg;
  361. unsigned long irqflags;
  362. assert_device_not_suspended(dev_priv);
  363. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  364. WARN_ON(!dev_priv->uncore.forcewake_count);
  365. if (--dev_priv->uncore.forcewake_count == 0)
  366. dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
  367. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  368. intel_runtime_pm_put(dev_priv);
  369. }
  370. void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
  371. {
  372. struct drm_i915_private *dev_priv = dev->dev_private;
  373. unsigned long irqflags;
  374. if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
  375. gen6_force_wake_timer((unsigned long)dev_priv);
  376. /* Hold uncore.lock across reset to prevent any register access
  377. * with forcewake not set correctly
  378. */
  379. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  380. if (IS_VALLEYVIEW(dev))
  381. vlv_force_wake_reset(dev_priv);
  382. else if (IS_GEN6(dev) || IS_GEN7(dev))
  383. __gen6_gt_force_wake_reset(dev_priv);
  384. if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
  385. __gen7_gt_force_wake_mt_reset(dev_priv);
  386. if (IS_GEN9(dev))
  387. __gen9_gt_force_wake_mt_reset(dev_priv);
  388. if (restore) { /* If reset with a user forcewake, try to restore */
  389. unsigned fw = 0;
  390. if (IS_VALLEYVIEW(dev)) {
  391. if (dev_priv->uncore.fw_rendercount)
  392. fw |= FORCEWAKE_RENDER;
  393. if (dev_priv->uncore.fw_mediacount)
  394. fw |= FORCEWAKE_MEDIA;
  395. } else if (IS_GEN9(dev)) {
  396. if (dev_priv->uncore.fw_rendercount)
  397. fw |= FORCEWAKE_RENDER;
  398. if (dev_priv->uncore.fw_mediacount)
  399. fw |= FORCEWAKE_MEDIA;
  400. if (dev_priv->uncore.fw_blittercount)
  401. fw |= FORCEWAKE_BLITTER;
  402. } else {
  403. if (dev_priv->uncore.forcewake_count)
  404. fw = FORCEWAKE_ALL;
  405. }
  406. if (fw)
  407. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  408. if (IS_GEN6(dev) || IS_GEN7(dev))
  409. dev_priv->uncore.fifo_count =
  410. __raw_i915_read32(dev_priv, GTFIFOCTL) &
  411. GT_FIFO_FREE_ENTRIES_MASK;
  412. }
  413. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  414. }
  415. static void __intel_uncore_early_sanitize(struct drm_device *dev,
  416. bool restore_forcewake)
  417. {
  418. struct drm_i915_private *dev_priv = dev->dev_private;
  419. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  420. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  421. if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
  422. (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
  423. /* The docs do not explain exactly how the calculation can be
  424. * made. It is somewhat guessable, but for now, it's always
  425. * 128MB.
  426. * NB: We can't write IDICR yet because we do not have gt funcs
  427. * set up */
  428. dev_priv->ellc_size = 128;
  429. DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  430. }
  431. /* clear out old GT FIFO errors */
  432. if (IS_GEN6(dev) || IS_GEN7(dev))
  433. __raw_i915_write32(dev_priv, GTFIFODBG,
  434. __raw_i915_read32(dev_priv, GTFIFODBG));
  435. intel_uncore_forcewake_reset(dev, restore_forcewake);
  436. }
  437. void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
  438. {
  439. __intel_uncore_early_sanitize(dev, restore_forcewake);
  440. i915_check_and_clear_faults(dev);
  441. }
  442. void intel_uncore_sanitize(struct drm_device *dev)
  443. {
  444. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  445. intel_disable_gt_powersave(dev);
  446. }
  447. /*
  448. * Generally this is called implicitly by the register read function. However,
  449. * if some sequence requires the GT to not power down then this function should
  450. * be called at the beginning of the sequence followed by a call to
  451. * gen6_gt_force_wake_put() at the end of the sequence.
  452. */
  453. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  454. {
  455. unsigned long irqflags;
  456. if (!dev_priv->uncore.funcs.force_wake_get)
  457. return;
  458. intel_runtime_pm_get(dev_priv);
  459. /* Redirect to Gen9 specific routine */
  460. if (IS_GEN9(dev_priv->dev))
  461. return gen9_force_wake_get(dev_priv, fw_engine);
  462. /* Redirect to VLV specific routine */
  463. if (IS_VALLEYVIEW(dev_priv->dev))
  464. return vlv_force_wake_get(dev_priv, fw_engine);
  465. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  466. if (dev_priv->uncore.forcewake_count++ == 0)
  467. dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
  468. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  469. }
  470. /*
  471. * see gen6_gt_force_wake_get()
  472. */
  473. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  474. {
  475. unsigned long irqflags;
  476. bool delayed = false;
  477. if (!dev_priv->uncore.funcs.force_wake_put)
  478. return;
  479. /* Redirect to Gen9 specific routine */
  480. if (IS_GEN9(dev_priv->dev)) {
  481. gen9_force_wake_put(dev_priv, fw_engine);
  482. goto out;
  483. }
  484. /* Redirect to VLV specific routine */
  485. if (IS_VALLEYVIEW(dev_priv->dev)) {
  486. vlv_force_wake_put(dev_priv, fw_engine);
  487. goto out;
  488. }
  489. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  490. WARN_ON(!dev_priv->uncore.forcewake_count);
  491. if (--dev_priv->uncore.forcewake_count == 0) {
  492. dev_priv->uncore.forcewake_count++;
  493. delayed = true;
  494. mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
  495. jiffies + 1);
  496. }
  497. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  498. out:
  499. if (!delayed)
  500. intel_runtime_pm_put(dev_priv);
  501. }
  502. void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
  503. {
  504. if (!dev_priv->uncore.funcs.force_wake_get)
  505. return;
  506. WARN_ON(dev_priv->uncore.forcewake_count > 0);
  507. }
  508. /* We give fast paths for the really cool registers */
  509. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  510. ((reg) < 0x40000 && (reg) != FORCEWAKE)
  511. #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
  512. #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
  513. (REG_RANGE((reg), 0x2000, 0x4000) || \
  514. REG_RANGE((reg), 0x5000, 0x8000) || \
  515. REG_RANGE((reg), 0xB000, 0x12000) || \
  516. REG_RANGE((reg), 0x2E000, 0x30000))
  517. #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
  518. (REG_RANGE((reg), 0x12000, 0x14000) || \
  519. REG_RANGE((reg), 0x22000, 0x24000) || \
  520. REG_RANGE((reg), 0x30000, 0x40000))
  521. #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
  522. (REG_RANGE((reg), 0x2000, 0x4000) || \
  523. REG_RANGE((reg), 0x5000, 0x8000) || \
  524. REG_RANGE((reg), 0x8300, 0x8500) || \
  525. REG_RANGE((reg), 0xB000, 0xC000) || \
  526. REG_RANGE((reg), 0xE000, 0xE800))
  527. #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
  528. (REG_RANGE((reg), 0x8800, 0x8900) || \
  529. REG_RANGE((reg), 0xD000, 0xD800) || \
  530. REG_RANGE((reg), 0x12000, 0x14000) || \
  531. REG_RANGE((reg), 0x1A000, 0x1C000) || \
  532. REG_RANGE((reg), 0x1E800, 0x1EA00) || \
  533. REG_RANGE((reg), 0x30000, 0x40000))
  534. #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
  535. (REG_RANGE((reg), 0x4000, 0x5000) || \
  536. REG_RANGE((reg), 0x8000, 0x8300) || \
  537. REG_RANGE((reg), 0x8500, 0x8600) || \
  538. REG_RANGE((reg), 0x9000, 0xB000) || \
  539. REG_RANGE((reg), 0xC000, 0xC800) || \
  540. REG_RANGE((reg), 0xF000, 0x10000) || \
  541. REG_RANGE((reg), 0x14000, 0x14400) || \
  542. REG_RANGE((reg), 0x22000, 0x24000))
  543. #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
  544. REG_RANGE((reg), 0xB00, 0x2000)
  545. #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
  546. (REG_RANGE((reg), 0x2000, 0x2700) || \
  547. REG_RANGE((reg), 0x3000, 0x4000) || \
  548. REG_RANGE((reg), 0x5200, 0x8000) || \
  549. REG_RANGE((reg), 0x8140, 0x8160) || \
  550. REG_RANGE((reg), 0x8300, 0x8500) || \
  551. REG_RANGE((reg), 0x8C00, 0x8D00) || \
  552. REG_RANGE((reg), 0xB000, 0xB480) || \
  553. REG_RANGE((reg), 0xE000, 0xE900) || \
  554. REG_RANGE((reg), 0x24400, 0x24800))
  555. #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
  556. (REG_RANGE((reg), 0x8130, 0x8140) || \
  557. REG_RANGE((reg), 0x8800, 0x8A00) || \
  558. REG_RANGE((reg), 0xD000, 0xD800) || \
  559. REG_RANGE((reg), 0x12000, 0x14000) || \
  560. REG_RANGE((reg), 0x1A000, 0x1EA00) || \
  561. REG_RANGE((reg), 0x30000, 0x40000))
  562. #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
  563. REG_RANGE((reg), 0x9400, 0x9800)
  564. #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
  565. ((reg) < 0x40000 &&\
  566. !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
  567. !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
  568. !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
  569. !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
  570. static void
  571. ilk_dummy_write(struct drm_i915_private *dev_priv)
  572. {
  573. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  574. * the chip from rc6 before touching it for real. MI_MODE is masked,
  575. * hence harmless to write 0 into. */
  576. __raw_i915_write32(dev_priv, MI_MODE, 0);
  577. }
  578. static void
  579. hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
  580. bool before)
  581. {
  582. const char *op = read ? "reading" : "writing to";
  583. const char *when = before ? "before" : "after";
  584. if (!i915.mmio_debug)
  585. return;
  586. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  587. WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
  588. when, op, reg);
  589. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  590. }
  591. }
  592. static void
  593. hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
  594. {
  595. if (i915.mmio_debug)
  596. return;
  597. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  598. DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
  599. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  600. }
  601. }
  602. #define REG_READ_HEADER(x) \
  603. unsigned long irqflags; \
  604. u##x val = 0; \
  605. assert_device_not_suspended(dev_priv); \
  606. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  607. #define REG_READ_FOOTER \
  608. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  609. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  610. return val
  611. #define __gen4_read(x) \
  612. static u##x \
  613. gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  614. REG_READ_HEADER(x); \
  615. val = __raw_i915_read##x(dev_priv, reg); \
  616. REG_READ_FOOTER; \
  617. }
  618. #define __gen5_read(x) \
  619. static u##x \
  620. gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  621. REG_READ_HEADER(x); \
  622. ilk_dummy_write(dev_priv); \
  623. val = __raw_i915_read##x(dev_priv, reg); \
  624. REG_READ_FOOTER; \
  625. }
  626. #define __gen6_read(x) \
  627. static u##x \
  628. gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  629. REG_READ_HEADER(x); \
  630. hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
  631. if (dev_priv->uncore.forcewake_count == 0 && \
  632. NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  633. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  634. FORCEWAKE_ALL); \
  635. val = __raw_i915_read##x(dev_priv, reg); \
  636. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  637. FORCEWAKE_ALL); \
  638. } else { \
  639. val = __raw_i915_read##x(dev_priv, reg); \
  640. } \
  641. hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
  642. REG_READ_FOOTER; \
  643. }
  644. #define __vlv_read(x) \
  645. static u##x \
  646. vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  647. unsigned fwengine = 0; \
  648. REG_READ_HEADER(x); \
  649. if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
  650. if (dev_priv->uncore.fw_rendercount == 0) \
  651. fwengine = FORCEWAKE_RENDER; \
  652. } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
  653. if (dev_priv->uncore.fw_mediacount == 0) \
  654. fwengine = FORCEWAKE_MEDIA; \
  655. } \
  656. if (fwengine) \
  657. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  658. val = __raw_i915_read##x(dev_priv, reg); \
  659. if (fwengine) \
  660. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  661. REG_READ_FOOTER; \
  662. }
  663. #define __chv_read(x) \
  664. static u##x \
  665. chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  666. unsigned fwengine = 0; \
  667. REG_READ_HEADER(x); \
  668. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  669. if (dev_priv->uncore.fw_rendercount == 0) \
  670. fwengine = FORCEWAKE_RENDER; \
  671. } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  672. if (dev_priv->uncore.fw_mediacount == 0) \
  673. fwengine = FORCEWAKE_MEDIA; \
  674. } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  675. if (dev_priv->uncore.fw_rendercount == 0) \
  676. fwengine |= FORCEWAKE_RENDER; \
  677. if (dev_priv->uncore.fw_mediacount == 0) \
  678. fwengine |= FORCEWAKE_MEDIA; \
  679. } \
  680. if (fwengine) \
  681. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  682. val = __raw_i915_read##x(dev_priv, reg); \
  683. if (fwengine) \
  684. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  685. REG_READ_FOOTER; \
  686. }
  687. #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
  688. ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
  689. #define __gen9_read(x) \
  690. static u##x \
  691. gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  692. REG_READ_HEADER(x); \
  693. if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  694. val = __raw_i915_read##x(dev_priv, reg); \
  695. } else { \
  696. unsigned fwengine = 0; \
  697. if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
  698. if (dev_priv->uncore.fw_rendercount == 0) \
  699. fwengine = FORCEWAKE_RENDER; \
  700. } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
  701. if (dev_priv->uncore.fw_mediacount == 0) \
  702. fwengine = FORCEWAKE_MEDIA; \
  703. } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
  704. if (dev_priv->uncore.fw_rendercount == 0) \
  705. fwengine |= FORCEWAKE_RENDER; \
  706. if (dev_priv->uncore.fw_mediacount == 0) \
  707. fwengine |= FORCEWAKE_MEDIA; \
  708. } else { \
  709. if (dev_priv->uncore.fw_blittercount == 0) \
  710. fwengine = FORCEWAKE_BLITTER; \
  711. } \
  712. if (fwengine) \
  713. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  714. val = __raw_i915_read##x(dev_priv, reg); \
  715. if (fwengine) \
  716. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  717. } \
  718. REG_READ_FOOTER; \
  719. }
  720. __gen9_read(8)
  721. __gen9_read(16)
  722. __gen9_read(32)
  723. __gen9_read(64)
  724. __chv_read(8)
  725. __chv_read(16)
  726. __chv_read(32)
  727. __chv_read(64)
  728. __vlv_read(8)
  729. __vlv_read(16)
  730. __vlv_read(32)
  731. __vlv_read(64)
  732. __gen6_read(8)
  733. __gen6_read(16)
  734. __gen6_read(32)
  735. __gen6_read(64)
  736. __gen5_read(8)
  737. __gen5_read(16)
  738. __gen5_read(32)
  739. __gen5_read(64)
  740. __gen4_read(8)
  741. __gen4_read(16)
  742. __gen4_read(32)
  743. __gen4_read(64)
  744. #undef __gen9_read
  745. #undef __chv_read
  746. #undef __vlv_read
  747. #undef __gen6_read
  748. #undef __gen5_read
  749. #undef __gen4_read
  750. #undef REG_READ_FOOTER
  751. #undef REG_READ_HEADER
  752. #define REG_WRITE_HEADER \
  753. unsigned long irqflags; \
  754. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  755. assert_device_not_suspended(dev_priv); \
  756. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  757. #define REG_WRITE_FOOTER \
  758. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  759. #define __gen4_write(x) \
  760. static void \
  761. gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  762. REG_WRITE_HEADER; \
  763. __raw_i915_write##x(dev_priv, reg, val); \
  764. REG_WRITE_FOOTER; \
  765. }
  766. #define __gen5_write(x) \
  767. static void \
  768. gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  769. REG_WRITE_HEADER; \
  770. ilk_dummy_write(dev_priv); \
  771. __raw_i915_write##x(dev_priv, reg, val); \
  772. REG_WRITE_FOOTER; \
  773. }
  774. #define __gen6_write(x) \
  775. static void \
  776. gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  777. u32 __fifo_ret = 0; \
  778. REG_WRITE_HEADER; \
  779. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  780. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  781. } \
  782. __raw_i915_write##x(dev_priv, reg, val); \
  783. if (unlikely(__fifo_ret)) { \
  784. gen6_gt_check_fifodbg(dev_priv); \
  785. } \
  786. REG_WRITE_FOOTER; \
  787. }
  788. #define __hsw_write(x) \
  789. static void \
  790. hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  791. u32 __fifo_ret = 0; \
  792. REG_WRITE_HEADER; \
  793. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  794. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  795. } \
  796. hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  797. __raw_i915_write##x(dev_priv, reg, val); \
  798. if (unlikely(__fifo_ret)) { \
  799. gen6_gt_check_fifodbg(dev_priv); \
  800. } \
  801. hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  802. hsw_unclaimed_reg_detect(dev_priv); \
  803. REG_WRITE_FOOTER; \
  804. }
  805. static const u32 gen8_shadowed_regs[] = {
  806. FORCEWAKE_MT,
  807. GEN6_RPNSWREQ,
  808. GEN6_RC_VIDEO_FREQ,
  809. RING_TAIL(RENDER_RING_BASE),
  810. RING_TAIL(GEN6_BSD_RING_BASE),
  811. RING_TAIL(VEBOX_RING_BASE),
  812. RING_TAIL(BLT_RING_BASE),
  813. /* TODO: Other registers are not yet used */
  814. };
  815. static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  816. {
  817. int i;
  818. for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  819. if (reg == gen8_shadowed_regs[i])
  820. return true;
  821. return false;
  822. }
  823. #define __gen8_write(x) \
  824. static void \
  825. gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  826. REG_WRITE_HEADER; \
  827. hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  828. if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
  829. if (dev_priv->uncore.forcewake_count == 0) \
  830. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  831. FORCEWAKE_ALL); \
  832. __raw_i915_write##x(dev_priv, reg, val); \
  833. if (dev_priv->uncore.forcewake_count == 0) \
  834. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  835. FORCEWAKE_ALL); \
  836. } else { \
  837. __raw_i915_write##x(dev_priv, reg, val); \
  838. } \
  839. hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  840. hsw_unclaimed_reg_detect(dev_priv); \
  841. REG_WRITE_FOOTER; \
  842. }
  843. #define __chv_write(x) \
  844. static void \
  845. chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  846. unsigned fwengine = 0; \
  847. bool shadowed = is_gen8_shadowed(dev_priv, reg); \
  848. REG_WRITE_HEADER; \
  849. if (!shadowed) { \
  850. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  851. if (dev_priv->uncore.fw_rendercount == 0) \
  852. fwengine = FORCEWAKE_RENDER; \
  853. } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  854. if (dev_priv->uncore.fw_mediacount == 0) \
  855. fwengine = FORCEWAKE_MEDIA; \
  856. } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  857. if (dev_priv->uncore.fw_rendercount == 0) \
  858. fwengine |= FORCEWAKE_RENDER; \
  859. if (dev_priv->uncore.fw_mediacount == 0) \
  860. fwengine |= FORCEWAKE_MEDIA; \
  861. } \
  862. } \
  863. if (fwengine) \
  864. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  865. __raw_i915_write##x(dev_priv, reg, val); \
  866. if (fwengine) \
  867. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  868. REG_WRITE_FOOTER; \
  869. }
  870. static const u32 gen9_shadowed_regs[] = {
  871. RING_TAIL(RENDER_RING_BASE),
  872. RING_TAIL(GEN6_BSD_RING_BASE),
  873. RING_TAIL(VEBOX_RING_BASE),
  874. RING_TAIL(BLT_RING_BASE),
  875. FORCEWAKE_BLITTER_GEN9,
  876. FORCEWAKE_RENDER_GEN9,
  877. FORCEWAKE_MEDIA_GEN9,
  878. GEN6_RPNSWREQ,
  879. GEN6_RC_VIDEO_FREQ,
  880. /* TODO: Other registers are not yet used */
  881. };
  882. static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  883. {
  884. int i;
  885. for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
  886. if (reg == gen9_shadowed_regs[i])
  887. return true;
  888. return false;
  889. }
  890. #define __gen9_write(x) \
  891. static void \
  892. gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
  893. bool trace) { \
  894. REG_WRITE_HEADER; \
  895. if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
  896. is_gen9_shadowed(dev_priv, reg)) { \
  897. __raw_i915_write##x(dev_priv, reg, val); \
  898. } else { \
  899. unsigned fwengine = 0; \
  900. if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
  901. if (dev_priv->uncore.fw_rendercount == 0) \
  902. fwengine = FORCEWAKE_RENDER; \
  903. } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
  904. if (dev_priv->uncore.fw_mediacount == 0) \
  905. fwengine = FORCEWAKE_MEDIA; \
  906. } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
  907. if (dev_priv->uncore.fw_rendercount == 0) \
  908. fwengine |= FORCEWAKE_RENDER; \
  909. if (dev_priv->uncore.fw_mediacount == 0) \
  910. fwengine |= FORCEWAKE_MEDIA; \
  911. } else { \
  912. if (dev_priv->uncore.fw_blittercount == 0) \
  913. fwengine = FORCEWAKE_BLITTER; \
  914. } \
  915. if (fwengine) \
  916. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  917. fwengine); \
  918. __raw_i915_write##x(dev_priv, reg, val); \
  919. if (fwengine) \
  920. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  921. fwengine); \
  922. } \
  923. REG_WRITE_FOOTER; \
  924. }
  925. __gen9_write(8)
  926. __gen9_write(16)
  927. __gen9_write(32)
  928. __gen9_write(64)
  929. __chv_write(8)
  930. __chv_write(16)
  931. __chv_write(32)
  932. __chv_write(64)
  933. __gen8_write(8)
  934. __gen8_write(16)
  935. __gen8_write(32)
  936. __gen8_write(64)
  937. __hsw_write(8)
  938. __hsw_write(16)
  939. __hsw_write(32)
  940. __hsw_write(64)
  941. __gen6_write(8)
  942. __gen6_write(16)
  943. __gen6_write(32)
  944. __gen6_write(64)
  945. __gen5_write(8)
  946. __gen5_write(16)
  947. __gen5_write(32)
  948. __gen5_write(64)
  949. __gen4_write(8)
  950. __gen4_write(16)
  951. __gen4_write(32)
  952. __gen4_write(64)
  953. #undef __gen9_write
  954. #undef __chv_write
  955. #undef __gen8_write
  956. #undef __hsw_write
  957. #undef __gen6_write
  958. #undef __gen5_write
  959. #undef __gen4_write
  960. #undef REG_WRITE_FOOTER
  961. #undef REG_WRITE_HEADER
  962. #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
  963. do { \
  964. dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
  965. dev_priv->uncore.funcs.mmio_writew = x##_write16; \
  966. dev_priv->uncore.funcs.mmio_writel = x##_write32; \
  967. dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
  968. } while (0)
  969. #define ASSIGN_READ_MMIO_VFUNCS(x) \
  970. do { \
  971. dev_priv->uncore.funcs.mmio_readb = x##_read8; \
  972. dev_priv->uncore.funcs.mmio_readw = x##_read16; \
  973. dev_priv->uncore.funcs.mmio_readl = x##_read32; \
  974. dev_priv->uncore.funcs.mmio_readq = x##_read64; \
  975. } while (0)
  976. void intel_uncore_init(struct drm_device *dev)
  977. {
  978. struct drm_i915_private *dev_priv = dev->dev_private;
  979. setup_timer(&dev_priv->uncore.force_wake_timer,
  980. gen6_force_wake_timer, (unsigned long)dev_priv);
  981. __intel_uncore_early_sanitize(dev, false);
  982. if (IS_GEN9(dev)) {
  983. dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
  984. dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
  985. } else if (IS_VALLEYVIEW(dev)) {
  986. dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
  987. dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
  988. } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  989. dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
  990. dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
  991. } else if (IS_IVYBRIDGE(dev)) {
  992. u32 ecobus;
  993. /* IVB configs may use multi-threaded forcewake */
  994. /* A small trick here - if the bios hasn't configured
  995. * MT forcewake, and if the device is in RC6, then
  996. * force_wake_mt_get will not wake the device and the
  997. * ECOBUS read will return zero. Which will be
  998. * (correctly) interpreted by the test below as MT
  999. * forcewake being disabled.
  1000. */
  1001. mutex_lock(&dev->struct_mutex);
  1002. __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
  1003. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  1004. __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
  1005. mutex_unlock(&dev->struct_mutex);
  1006. if (ecobus & FORCEWAKE_MT_ENABLE) {
  1007. dev_priv->uncore.funcs.force_wake_get =
  1008. __gen7_gt_force_wake_mt_get;
  1009. dev_priv->uncore.funcs.force_wake_put =
  1010. __gen7_gt_force_wake_mt_put;
  1011. } else {
  1012. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  1013. DRM_INFO("when using vblank-synced partial screen updates.\n");
  1014. dev_priv->uncore.funcs.force_wake_get =
  1015. __gen6_gt_force_wake_get;
  1016. dev_priv->uncore.funcs.force_wake_put =
  1017. __gen6_gt_force_wake_put;
  1018. }
  1019. } else if (IS_GEN6(dev)) {
  1020. dev_priv->uncore.funcs.force_wake_get =
  1021. __gen6_gt_force_wake_get;
  1022. dev_priv->uncore.funcs.force_wake_put =
  1023. __gen6_gt_force_wake_put;
  1024. }
  1025. switch (INTEL_INFO(dev)->gen) {
  1026. default:
  1027. WARN_ON(1);
  1028. return;
  1029. case 9:
  1030. ASSIGN_WRITE_MMIO_VFUNCS(gen9);
  1031. ASSIGN_READ_MMIO_VFUNCS(gen9);
  1032. break;
  1033. case 8:
  1034. if (IS_CHERRYVIEW(dev)) {
  1035. ASSIGN_WRITE_MMIO_VFUNCS(chv);
  1036. ASSIGN_READ_MMIO_VFUNCS(chv);
  1037. } else {
  1038. ASSIGN_WRITE_MMIO_VFUNCS(gen8);
  1039. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1040. }
  1041. break;
  1042. case 7:
  1043. case 6:
  1044. if (IS_HASWELL(dev)) {
  1045. ASSIGN_WRITE_MMIO_VFUNCS(hsw);
  1046. } else {
  1047. ASSIGN_WRITE_MMIO_VFUNCS(gen6);
  1048. }
  1049. if (IS_VALLEYVIEW(dev)) {
  1050. ASSIGN_READ_MMIO_VFUNCS(vlv);
  1051. } else {
  1052. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1053. }
  1054. break;
  1055. case 5:
  1056. ASSIGN_WRITE_MMIO_VFUNCS(gen5);
  1057. ASSIGN_READ_MMIO_VFUNCS(gen5);
  1058. break;
  1059. case 4:
  1060. case 3:
  1061. case 2:
  1062. ASSIGN_WRITE_MMIO_VFUNCS(gen4);
  1063. ASSIGN_READ_MMIO_VFUNCS(gen4);
  1064. break;
  1065. }
  1066. i915_check_and_clear_faults(dev);
  1067. }
  1068. #undef ASSIGN_WRITE_MMIO_VFUNCS
  1069. #undef ASSIGN_READ_MMIO_VFUNCS
  1070. void intel_uncore_fini(struct drm_device *dev)
  1071. {
  1072. /* Paranoia: make sure we have disabled everything before we exit. */
  1073. intel_uncore_sanitize(dev);
  1074. intel_uncore_forcewake_reset(dev, false);
  1075. }
  1076. #define GEN_RANGE(l, h) GENMASK(h, l)
  1077. static const struct register_whitelist {
  1078. uint64_t offset;
  1079. uint32_t size;
  1080. /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  1081. uint32_t gen_bitmask;
  1082. } whitelist[] = {
  1083. { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
  1084. };
  1085. int i915_reg_read_ioctl(struct drm_device *dev,
  1086. void *data, struct drm_file *file)
  1087. {
  1088. struct drm_i915_private *dev_priv = dev->dev_private;
  1089. struct drm_i915_reg_read *reg = data;
  1090. struct register_whitelist const *entry = whitelist;
  1091. int i, ret = 0;
  1092. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  1093. if (entry->offset == reg->offset &&
  1094. (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  1095. break;
  1096. }
  1097. if (i == ARRAY_SIZE(whitelist))
  1098. return -EINVAL;
  1099. intel_runtime_pm_get(dev_priv);
  1100. switch (entry->size) {
  1101. case 8:
  1102. reg->val = I915_READ64(reg->offset);
  1103. break;
  1104. case 4:
  1105. reg->val = I915_READ(reg->offset);
  1106. break;
  1107. case 2:
  1108. reg->val = I915_READ16(reg->offset);
  1109. break;
  1110. case 1:
  1111. reg->val = I915_READ8(reg->offset);
  1112. break;
  1113. default:
  1114. WARN_ON(1);
  1115. ret = -EINVAL;
  1116. goto out;
  1117. }
  1118. out:
  1119. intel_runtime_pm_put(dev_priv);
  1120. return ret;
  1121. }
  1122. int i915_get_reset_stats_ioctl(struct drm_device *dev,
  1123. void *data, struct drm_file *file)
  1124. {
  1125. struct drm_i915_private *dev_priv = dev->dev_private;
  1126. struct drm_i915_reset_stats *args = data;
  1127. struct i915_ctx_hang_stats *hs;
  1128. struct intel_context *ctx;
  1129. int ret;
  1130. if (args->flags || args->pad)
  1131. return -EINVAL;
  1132. if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
  1133. return -EPERM;
  1134. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1135. if (ret)
  1136. return ret;
  1137. ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
  1138. if (IS_ERR(ctx)) {
  1139. mutex_unlock(&dev->struct_mutex);
  1140. return PTR_ERR(ctx);
  1141. }
  1142. hs = &ctx->hang_stats;
  1143. if (capable(CAP_SYS_ADMIN))
  1144. args->reset_count = i915_reset_count(&dev_priv->gpu_error);
  1145. else
  1146. args->reset_count = 0;
  1147. args->batch_active = hs->batch_active;
  1148. args->batch_pending = hs->batch_pending;
  1149. mutex_unlock(&dev->struct_mutex);
  1150. return 0;
  1151. }
  1152. static int i915_reset_complete(struct drm_device *dev)
  1153. {
  1154. u8 gdrst;
  1155. pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
  1156. return (gdrst & GRDOM_RESET_STATUS) == 0;
  1157. }
  1158. static int i915_do_reset(struct drm_device *dev)
  1159. {
  1160. /* assert reset for at least 20 usec */
  1161. pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1162. udelay(20);
  1163. pci_write_config_byte(dev->pdev, I915_GDRST, 0);
  1164. return wait_for(i915_reset_complete(dev), 500);
  1165. }
  1166. static int g4x_reset_complete(struct drm_device *dev)
  1167. {
  1168. u8 gdrst;
  1169. pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
  1170. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  1171. }
  1172. static int g33_do_reset(struct drm_device *dev)
  1173. {
  1174. pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1175. return wait_for(g4x_reset_complete(dev), 500);
  1176. }
  1177. static int g4x_do_reset(struct drm_device *dev)
  1178. {
  1179. struct drm_i915_private *dev_priv = dev->dev_private;
  1180. int ret;
  1181. pci_write_config_byte(dev->pdev, I915_GDRST,
  1182. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1183. ret = wait_for(g4x_reset_complete(dev), 500);
  1184. if (ret)
  1185. return ret;
  1186. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1187. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  1188. POSTING_READ(VDECCLK_GATE_D);
  1189. pci_write_config_byte(dev->pdev, I915_GDRST,
  1190. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1191. ret = wait_for(g4x_reset_complete(dev), 500);
  1192. if (ret)
  1193. return ret;
  1194. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1195. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  1196. POSTING_READ(VDECCLK_GATE_D);
  1197. pci_write_config_byte(dev->pdev, I915_GDRST, 0);
  1198. return 0;
  1199. }
  1200. static int ironlake_do_reset(struct drm_device *dev)
  1201. {
  1202. struct drm_i915_private *dev_priv = dev->dev_private;
  1203. int ret;
  1204. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  1205. ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  1206. ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  1207. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1208. if (ret)
  1209. return ret;
  1210. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  1211. ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  1212. ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  1213. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1214. if (ret)
  1215. return ret;
  1216. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
  1217. return 0;
  1218. }
  1219. static int gen6_do_reset(struct drm_device *dev)
  1220. {
  1221. struct drm_i915_private *dev_priv = dev->dev_private;
  1222. int ret;
  1223. /* Reset the chip */
  1224. /* GEN6_GDRST is not in the gt power well, no need to check
  1225. * for fifo space for the write or forcewake the chip for
  1226. * the read
  1227. */
  1228. __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  1229. /* Spin waiting for the device to ack the reset request */
  1230. ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  1231. intel_uncore_forcewake_reset(dev, true);
  1232. return ret;
  1233. }
  1234. int intel_gpu_reset(struct drm_device *dev)
  1235. {
  1236. if (INTEL_INFO(dev)->gen >= 6)
  1237. return gen6_do_reset(dev);
  1238. else if (IS_GEN5(dev))
  1239. return ironlake_do_reset(dev);
  1240. else if (IS_G4X(dev))
  1241. return g4x_do_reset(dev);
  1242. else if (IS_G33(dev))
  1243. return g33_do_reset(dev);
  1244. else if (INTEL_INFO(dev)->gen >= 3)
  1245. return i915_do_reset(dev);
  1246. else
  1247. return -ENODEV;
  1248. }
  1249. void intel_uncore_check_errors(struct drm_device *dev)
  1250. {
  1251. struct drm_i915_private *dev_priv = dev->dev_private;
  1252. if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  1253. (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  1254. DRM_ERROR("Unclaimed register before interrupt\n");
  1255. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  1256. }
  1257. }