intel_uncore.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  26. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  27. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  28. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  29. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  30. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  31. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  32. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  34. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  35. static void
  36. assert_device_not_suspended(struct drm_i915_private *dev_priv)
  37. {
  38. WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
  39. "Device suspended\n");
  40. }
  41. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  42. {
  43. u32 gt_thread_status_mask;
  44. if (IS_HASWELL(dev_priv->dev))
  45. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
  46. else
  47. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
  48. /* w/a for a sporadic read returning 0 by waiting for the GT
  49. * thread to wake up.
  50. */
  51. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
  52. DRM_ERROR("GT thread status wait timed out\n");
  53. }
  54. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  55. {
  56. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  57. /* something from same cacheline, but !FORCEWAKE */
  58. __raw_posting_read(dev_priv, ECOBUS);
  59. }
  60. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
  61. int fw_engine)
  62. {
  63. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
  64. FORCEWAKE_ACK_TIMEOUT_MS))
  65. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  66. __raw_i915_write32(dev_priv, FORCEWAKE, 1);
  67. /* something from same cacheline, but !FORCEWAKE */
  68. __raw_posting_read(dev_priv, ECOBUS);
  69. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
  70. FORCEWAKE_ACK_TIMEOUT_MS))
  71. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  72. /* WaRsForcewakeWaitTC0:snb */
  73. __gen6_gt_wait_for_thread_c0(dev_priv);
  74. }
  75. static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  76. {
  77. __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  78. /* something from same cacheline, but !FORCEWAKE_MT */
  79. __raw_posting_read(dev_priv, ECOBUS);
  80. }
  81. static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
  82. int fw_engine)
  83. {
  84. u32 forcewake_ack;
  85. if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
  86. forcewake_ack = FORCEWAKE_ACK_HSW;
  87. else
  88. forcewake_ack = FORCEWAKE_MT_ACK;
  89. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  90. FORCEWAKE_ACK_TIMEOUT_MS))
  91. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  92. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  93. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  94. /* something from same cacheline, but !FORCEWAKE_MT */
  95. __raw_posting_read(dev_priv, ECOBUS);
  96. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
  97. FORCEWAKE_ACK_TIMEOUT_MS))
  98. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  99. /* WaRsForcewakeWaitTC0:ivb,hsw */
  100. if (INTEL_INFO(dev_priv->dev)->gen < 8)
  101. __gen6_gt_wait_for_thread_c0(dev_priv);
  102. }
  103. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  104. {
  105. u32 gtfifodbg;
  106. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  107. if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  108. __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  109. }
  110. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
  111. int fw_engine)
  112. {
  113. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  114. /* something from same cacheline, but !FORCEWAKE */
  115. __raw_posting_read(dev_priv, ECOBUS);
  116. gen6_gt_check_fifodbg(dev_priv);
  117. }
  118. static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
  119. int fw_engine)
  120. {
  121. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  122. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  123. /* something from same cacheline, but !FORCEWAKE_MT */
  124. __raw_posting_read(dev_priv, ECOBUS);
  125. if (IS_GEN7(dev_priv->dev))
  126. gen6_gt_check_fifodbg(dev_priv);
  127. }
  128. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  129. {
  130. int ret = 0;
  131. /* On VLV, FIFO will be shared by both SW and HW.
  132. * So, we need to read the FREE_ENTRIES everytime */
  133. if (IS_VALLEYVIEW(dev_priv->dev))
  134. dev_priv->uncore.fifo_count =
  135. __raw_i915_read32(dev_priv, GTFIFOCTL) &
  136. GT_FIFO_FREE_ENTRIES_MASK;
  137. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  138. int loop = 500;
  139. u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  140. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  141. udelay(10);
  142. fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  143. }
  144. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  145. ++ret;
  146. dev_priv->uncore.fifo_count = fifo;
  147. }
  148. dev_priv->uncore.fifo_count--;
  149. return ret;
  150. }
  151. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  152. {
  153. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  154. _MASKED_BIT_DISABLE(0xffff));
  155. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  156. _MASKED_BIT_DISABLE(0xffff));
  157. /* something from same cacheline, but !FORCEWAKE_VLV */
  158. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  159. }
  160. static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
  161. int fw_engine)
  162. {
  163. /* Check for Render Engine */
  164. if (FORCEWAKE_RENDER & fw_engine) {
  165. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  166. FORCEWAKE_ACK_VLV) &
  167. FORCEWAKE_KERNEL) == 0,
  168. FORCEWAKE_ACK_TIMEOUT_MS))
  169. DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  170. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  171. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  172. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  173. FORCEWAKE_ACK_VLV) &
  174. FORCEWAKE_KERNEL),
  175. FORCEWAKE_ACK_TIMEOUT_MS))
  176. DRM_ERROR("Timed out: waiting for Render to ack.\n");
  177. }
  178. /* Check for Media Engine */
  179. if (FORCEWAKE_MEDIA & fw_engine) {
  180. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  181. FORCEWAKE_ACK_MEDIA_VLV) &
  182. FORCEWAKE_KERNEL) == 0,
  183. FORCEWAKE_ACK_TIMEOUT_MS))
  184. DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
  185. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  186. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  187. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  188. FORCEWAKE_ACK_MEDIA_VLV) &
  189. FORCEWAKE_KERNEL),
  190. FORCEWAKE_ACK_TIMEOUT_MS))
  191. DRM_ERROR("Timed out: waiting for media to ack.\n");
  192. }
  193. /* WaRsForcewakeWaitTC0:vlv */
  194. if (!IS_CHERRYVIEW(dev_priv->dev))
  195. __gen6_gt_wait_for_thread_c0(dev_priv);
  196. }
  197. static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
  198. int fw_engine)
  199. {
  200. /* Check for Render Engine */
  201. if (FORCEWAKE_RENDER & fw_engine)
  202. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  203. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  204. /* Check for Media Engine */
  205. if (FORCEWAKE_MEDIA & fw_engine)
  206. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  207. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  208. /* something from same cacheline, but !FORCEWAKE_VLV */
  209. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  210. if (!IS_CHERRYVIEW(dev_priv->dev))
  211. gen6_gt_check_fifodbg(dev_priv);
  212. }
  213. static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  214. {
  215. unsigned long irqflags;
  216. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  217. if (fw_engine & FORCEWAKE_RENDER &&
  218. dev_priv->uncore.fw_rendercount++ != 0)
  219. fw_engine &= ~FORCEWAKE_RENDER;
  220. if (fw_engine & FORCEWAKE_MEDIA &&
  221. dev_priv->uncore.fw_mediacount++ != 0)
  222. fw_engine &= ~FORCEWAKE_MEDIA;
  223. if (fw_engine)
  224. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
  225. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  226. }
  227. static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  228. {
  229. unsigned long irqflags;
  230. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  231. if (fw_engine & FORCEWAKE_RENDER) {
  232. WARN_ON(!dev_priv->uncore.fw_rendercount);
  233. if (--dev_priv->uncore.fw_rendercount != 0)
  234. fw_engine &= ~FORCEWAKE_RENDER;
  235. }
  236. if (fw_engine & FORCEWAKE_MEDIA) {
  237. WARN_ON(!dev_priv->uncore.fw_mediacount);
  238. if (--dev_priv->uncore.fw_mediacount != 0)
  239. fw_engine &= ~FORCEWAKE_MEDIA;
  240. }
  241. if (fw_engine)
  242. dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
  243. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  244. }
  245. static void gen6_force_wake_timer(unsigned long arg)
  246. {
  247. struct drm_i915_private *dev_priv = (void *)arg;
  248. unsigned long irqflags;
  249. assert_device_not_suspended(dev_priv);
  250. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  251. WARN_ON(!dev_priv->uncore.forcewake_count);
  252. if (--dev_priv->uncore.forcewake_count == 0)
  253. dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
  254. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  255. intel_runtime_pm_put(dev_priv);
  256. }
  257. static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
  258. {
  259. struct drm_i915_private *dev_priv = dev->dev_private;
  260. unsigned long irqflags;
  261. del_timer_sync(&dev_priv->uncore.force_wake_timer);
  262. /* Hold uncore.lock across reset to prevent any register access
  263. * with forcewake not set correctly
  264. */
  265. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  266. if (IS_VALLEYVIEW(dev))
  267. vlv_force_wake_reset(dev_priv);
  268. else if (IS_GEN6(dev) || IS_GEN7(dev))
  269. __gen6_gt_force_wake_reset(dev_priv);
  270. if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
  271. __gen7_gt_force_wake_mt_reset(dev_priv);
  272. if (restore) { /* If reset with a user forcewake, try to restore */
  273. unsigned fw = 0;
  274. if (IS_VALLEYVIEW(dev)) {
  275. if (dev_priv->uncore.fw_rendercount)
  276. fw |= FORCEWAKE_RENDER;
  277. if (dev_priv->uncore.fw_mediacount)
  278. fw |= FORCEWAKE_MEDIA;
  279. } else {
  280. if (dev_priv->uncore.forcewake_count)
  281. fw = FORCEWAKE_ALL;
  282. }
  283. if (fw)
  284. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  285. if (IS_GEN6(dev) || IS_GEN7(dev))
  286. dev_priv->uncore.fifo_count =
  287. __raw_i915_read32(dev_priv, GTFIFOCTL) &
  288. GT_FIFO_FREE_ENTRIES_MASK;
  289. } else {
  290. dev_priv->uncore.forcewake_count = 0;
  291. dev_priv->uncore.fw_rendercount = 0;
  292. dev_priv->uncore.fw_mediacount = 0;
  293. }
  294. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  295. }
  296. void intel_uncore_early_sanitize(struct drm_device *dev)
  297. {
  298. struct drm_i915_private *dev_priv = dev->dev_private;
  299. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  300. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  301. if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
  302. (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
  303. /* The docs do not explain exactly how the calculation can be
  304. * made. It is somewhat guessable, but for now, it's always
  305. * 128MB.
  306. * NB: We can't write IDICR yet because we do not have gt funcs
  307. * set up */
  308. dev_priv->ellc_size = 128;
  309. DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  310. }
  311. /* clear out old GT FIFO errors */
  312. if (IS_GEN6(dev) || IS_GEN7(dev))
  313. __raw_i915_write32(dev_priv, GTFIFODBG,
  314. __raw_i915_read32(dev_priv, GTFIFODBG));
  315. intel_uncore_forcewake_reset(dev, false);
  316. }
  317. void intel_uncore_sanitize(struct drm_device *dev)
  318. {
  319. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  320. intel_disable_gt_powersave(dev);
  321. }
  322. /*
  323. * Generally this is called implicitly by the register read function. However,
  324. * if some sequence requires the GT to not power down then this function should
  325. * be called at the beginning of the sequence followed by a call to
  326. * gen6_gt_force_wake_put() at the end of the sequence.
  327. */
  328. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  329. {
  330. unsigned long irqflags;
  331. if (!dev_priv->uncore.funcs.force_wake_get)
  332. return;
  333. intel_runtime_pm_get(dev_priv);
  334. /* Redirect to VLV specific routine */
  335. if (IS_VALLEYVIEW(dev_priv->dev))
  336. return vlv_force_wake_get(dev_priv, fw_engine);
  337. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  338. if (dev_priv->uncore.forcewake_count++ == 0)
  339. dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
  340. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  341. }
  342. /*
  343. * see gen6_gt_force_wake_get()
  344. */
  345. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  346. {
  347. unsigned long irqflags;
  348. bool delayed = false;
  349. if (!dev_priv->uncore.funcs.force_wake_put)
  350. return;
  351. /* Redirect to VLV specific routine */
  352. if (IS_VALLEYVIEW(dev_priv->dev)) {
  353. vlv_force_wake_put(dev_priv, fw_engine);
  354. goto out;
  355. }
  356. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  357. WARN_ON(!dev_priv->uncore.forcewake_count);
  358. if (--dev_priv->uncore.forcewake_count == 0) {
  359. dev_priv->uncore.forcewake_count++;
  360. delayed = true;
  361. mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
  362. jiffies + 1);
  363. }
  364. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  365. out:
  366. if (!delayed)
  367. intel_runtime_pm_put(dev_priv);
  368. }
  369. void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
  370. {
  371. if (!dev_priv->uncore.funcs.force_wake_get)
  372. return;
  373. WARN_ON(dev_priv->uncore.forcewake_count > 0);
  374. }
  375. /* We give fast paths for the really cool registers */
  376. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  377. ((reg) < 0x40000 && (reg) != FORCEWAKE)
  378. #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
  379. #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
  380. (REG_RANGE((reg), 0x2000, 0x4000) || \
  381. REG_RANGE((reg), 0x5000, 0x8000) || \
  382. REG_RANGE((reg), 0xB000, 0x12000) || \
  383. REG_RANGE((reg), 0x2E000, 0x30000))
  384. #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
  385. (REG_RANGE((reg), 0x12000, 0x14000) || \
  386. REG_RANGE((reg), 0x22000, 0x24000) || \
  387. REG_RANGE((reg), 0x30000, 0x40000))
  388. #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
  389. (REG_RANGE((reg), 0x2000, 0x4000) || \
  390. REG_RANGE((reg), 0x5000, 0x8000) || \
  391. REG_RANGE((reg), 0x8300, 0x8500) || \
  392. REG_RANGE((reg), 0xB000, 0xC000) || \
  393. REG_RANGE((reg), 0xE000, 0xE800))
  394. #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
  395. (REG_RANGE((reg), 0x8800, 0x8900) || \
  396. REG_RANGE((reg), 0xD000, 0xD800) || \
  397. REG_RANGE((reg), 0x12000, 0x14000) || \
  398. REG_RANGE((reg), 0x1A000, 0x1C000) || \
  399. REG_RANGE((reg), 0x1E800, 0x1EA00) || \
  400. REG_RANGE((reg), 0x30000, 0x40000))
  401. #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
  402. (REG_RANGE((reg), 0x4000, 0x5000) || \
  403. REG_RANGE((reg), 0x8000, 0x8300) || \
  404. REG_RANGE((reg), 0x8500, 0x8600) || \
  405. REG_RANGE((reg), 0x9000, 0xB000) || \
  406. REG_RANGE((reg), 0xC000, 0xC800) || \
  407. REG_RANGE((reg), 0xF000, 0x10000) || \
  408. REG_RANGE((reg), 0x14000, 0x14400) || \
  409. REG_RANGE((reg), 0x22000, 0x24000))
  410. static void
  411. ilk_dummy_write(struct drm_i915_private *dev_priv)
  412. {
  413. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  414. * the chip from rc6 before touching it for real. MI_MODE is masked,
  415. * hence harmless to write 0 into. */
  416. __raw_i915_write32(dev_priv, MI_MODE, 0);
  417. }
  418. static void
  419. hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
  420. {
  421. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  422. DRM_ERROR("Unknown unclaimed register before writing to %x\n",
  423. reg);
  424. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  425. }
  426. }
  427. static void
  428. hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
  429. {
  430. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  431. DRM_ERROR("Unclaimed write to %x\n", reg);
  432. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  433. }
  434. }
  435. #define REG_READ_HEADER(x) \
  436. unsigned long irqflags; \
  437. u##x val = 0; \
  438. assert_device_not_suspended(dev_priv); \
  439. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  440. #define REG_READ_FOOTER \
  441. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  442. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  443. return val
  444. #define __gen4_read(x) \
  445. static u##x \
  446. gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  447. REG_READ_HEADER(x); \
  448. val = __raw_i915_read##x(dev_priv, reg); \
  449. REG_READ_FOOTER; \
  450. }
  451. #define __gen5_read(x) \
  452. static u##x \
  453. gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  454. REG_READ_HEADER(x); \
  455. ilk_dummy_write(dev_priv); \
  456. val = __raw_i915_read##x(dev_priv, reg); \
  457. REG_READ_FOOTER; \
  458. }
  459. #define __gen6_read(x) \
  460. static u##x \
  461. gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  462. REG_READ_HEADER(x); \
  463. if (dev_priv->uncore.forcewake_count == 0 && \
  464. NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  465. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  466. FORCEWAKE_ALL); \
  467. val = __raw_i915_read##x(dev_priv, reg); \
  468. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  469. FORCEWAKE_ALL); \
  470. } else { \
  471. val = __raw_i915_read##x(dev_priv, reg); \
  472. } \
  473. REG_READ_FOOTER; \
  474. }
  475. #define __vlv_read(x) \
  476. static u##x \
  477. vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  478. unsigned fwengine = 0; \
  479. REG_READ_HEADER(x); \
  480. if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
  481. if (dev_priv->uncore.fw_rendercount == 0) \
  482. fwengine = FORCEWAKE_RENDER; \
  483. } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
  484. if (dev_priv->uncore.fw_mediacount == 0) \
  485. fwengine = FORCEWAKE_MEDIA; \
  486. } \
  487. if (fwengine) \
  488. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  489. val = __raw_i915_read##x(dev_priv, reg); \
  490. if (fwengine) \
  491. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  492. REG_READ_FOOTER; \
  493. }
  494. #define __chv_read(x) \
  495. static u##x \
  496. chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  497. unsigned fwengine = 0; \
  498. REG_READ_HEADER(x); \
  499. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  500. if (dev_priv->uncore.fw_rendercount == 0) \
  501. fwengine = FORCEWAKE_RENDER; \
  502. } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  503. if (dev_priv->uncore.fw_mediacount == 0) \
  504. fwengine = FORCEWAKE_MEDIA; \
  505. } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  506. if (dev_priv->uncore.fw_rendercount == 0) \
  507. fwengine |= FORCEWAKE_RENDER; \
  508. if (dev_priv->uncore.fw_mediacount == 0) \
  509. fwengine |= FORCEWAKE_MEDIA; \
  510. } \
  511. if (fwengine) \
  512. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  513. val = __raw_i915_read##x(dev_priv, reg); \
  514. if (fwengine) \
  515. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  516. REG_READ_FOOTER; \
  517. }
  518. __chv_read(8)
  519. __chv_read(16)
  520. __chv_read(32)
  521. __chv_read(64)
  522. __vlv_read(8)
  523. __vlv_read(16)
  524. __vlv_read(32)
  525. __vlv_read(64)
  526. __gen6_read(8)
  527. __gen6_read(16)
  528. __gen6_read(32)
  529. __gen6_read(64)
  530. __gen5_read(8)
  531. __gen5_read(16)
  532. __gen5_read(32)
  533. __gen5_read(64)
  534. __gen4_read(8)
  535. __gen4_read(16)
  536. __gen4_read(32)
  537. __gen4_read(64)
  538. #undef __chv_read
  539. #undef __vlv_read
  540. #undef __gen6_read
  541. #undef __gen5_read
  542. #undef __gen4_read
  543. #undef REG_READ_FOOTER
  544. #undef REG_READ_HEADER
  545. #define REG_WRITE_HEADER \
  546. unsigned long irqflags; \
  547. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  548. assert_device_not_suspended(dev_priv); \
  549. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  550. #define REG_WRITE_FOOTER \
  551. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  552. #define __gen4_write(x) \
  553. static void \
  554. gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  555. REG_WRITE_HEADER; \
  556. __raw_i915_write##x(dev_priv, reg, val); \
  557. REG_WRITE_FOOTER; \
  558. }
  559. #define __gen5_write(x) \
  560. static void \
  561. gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  562. REG_WRITE_HEADER; \
  563. ilk_dummy_write(dev_priv); \
  564. __raw_i915_write##x(dev_priv, reg, val); \
  565. REG_WRITE_FOOTER; \
  566. }
  567. #define __gen6_write(x) \
  568. static void \
  569. gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  570. u32 __fifo_ret = 0; \
  571. REG_WRITE_HEADER; \
  572. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  573. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  574. } \
  575. __raw_i915_write##x(dev_priv, reg, val); \
  576. if (unlikely(__fifo_ret)) { \
  577. gen6_gt_check_fifodbg(dev_priv); \
  578. } \
  579. REG_WRITE_FOOTER; \
  580. }
  581. #define __hsw_write(x) \
  582. static void \
  583. hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  584. u32 __fifo_ret = 0; \
  585. REG_WRITE_HEADER; \
  586. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  587. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  588. } \
  589. hsw_unclaimed_reg_clear(dev_priv, reg); \
  590. __raw_i915_write##x(dev_priv, reg, val); \
  591. if (unlikely(__fifo_ret)) { \
  592. gen6_gt_check_fifodbg(dev_priv); \
  593. } \
  594. hsw_unclaimed_reg_check(dev_priv, reg); \
  595. REG_WRITE_FOOTER; \
  596. }
  597. static const u32 gen8_shadowed_regs[] = {
  598. FORCEWAKE_MT,
  599. GEN6_RPNSWREQ,
  600. GEN6_RC_VIDEO_FREQ,
  601. RING_TAIL(RENDER_RING_BASE),
  602. RING_TAIL(GEN6_BSD_RING_BASE),
  603. RING_TAIL(VEBOX_RING_BASE),
  604. RING_TAIL(BLT_RING_BASE),
  605. /* TODO: Other registers are not yet used */
  606. };
  607. static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  608. {
  609. int i;
  610. for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  611. if (reg == gen8_shadowed_regs[i])
  612. return true;
  613. return false;
  614. }
  615. #define __gen8_write(x) \
  616. static void \
  617. gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  618. REG_WRITE_HEADER; \
  619. if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
  620. if (dev_priv->uncore.forcewake_count == 0) \
  621. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  622. FORCEWAKE_ALL); \
  623. __raw_i915_write##x(dev_priv, reg, val); \
  624. if (dev_priv->uncore.forcewake_count == 0) \
  625. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  626. FORCEWAKE_ALL); \
  627. } else { \
  628. __raw_i915_write##x(dev_priv, reg, val); \
  629. } \
  630. REG_WRITE_FOOTER; \
  631. }
  632. #define __chv_write(x) \
  633. static void \
  634. chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  635. unsigned fwengine = 0; \
  636. bool shadowed = is_gen8_shadowed(dev_priv, reg); \
  637. REG_WRITE_HEADER; \
  638. if (!shadowed) { \
  639. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  640. if (dev_priv->uncore.fw_rendercount == 0) \
  641. fwengine = FORCEWAKE_RENDER; \
  642. } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  643. if (dev_priv->uncore.fw_mediacount == 0) \
  644. fwengine = FORCEWAKE_MEDIA; \
  645. } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  646. if (dev_priv->uncore.fw_rendercount == 0) \
  647. fwengine |= FORCEWAKE_RENDER; \
  648. if (dev_priv->uncore.fw_mediacount == 0) \
  649. fwengine |= FORCEWAKE_MEDIA; \
  650. } \
  651. } \
  652. if (fwengine) \
  653. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  654. __raw_i915_write##x(dev_priv, reg, val); \
  655. if (fwengine) \
  656. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  657. REG_WRITE_FOOTER; \
  658. }
  659. __chv_write(8)
  660. __chv_write(16)
  661. __chv_write(32)
  662. __chv_write(64)
  663. __gen8_write(8)
  664. __gen8_write(16)
  665. __gen8_write(32)
  666. __gen8_write(64)
  667. __hsw_write(8)
  668. __hsw_write(16)
  669. __hsw_write(32)
  670. __hsw_write(64)
  671. __gen6_write(8)
  672. __gen6_write(16)
  673. __gen6_write(32)
  674. __gen6_write(64)
  675. __gen5_write(8)
  676. __gen5_write(16)
  677. __gen5_write(32)
  678. __gen5_write(64)
  679. __gen4_write(8)
  680. __gen4_write(16)
  681. __gen4_write(32)
  682. __gen4_write(64)
  683. #undef __chv_write
  684. #undef __gen8_write
  685. #undef __hsw_write
  686. #undef __gen6_write
  687. #undef __gen5_write
  688. #undef __gen4_write
  689. #undef REG_WRITE_FOOTER
  690. #undef REG_WRITE_HEADER
  691. void intel_uncore_init(struct drm_device *dev)
  692. {
  693. struct drm_i915_private *dev_priv = dev->dev_private;
  694. setup_timer(&dev_priv->uncore.force_wake_timer,
  695. gen6_force_wake_timer, (unsigned long)dev_priv);
  696. intel_uncore_early_sanitize(dev);
  697. if (IS_VALLEYVIEW(dev)) {
  698. dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
  699. dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
  700. } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
  701. dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
  702. dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
  703. } else if (IS_IVYBRIDGE(dev)) {
  704. u32 ecobus;
  705. /* IVB configs may use multi-threaded forcewake */
  706. /* A small trick here - if the bios hasn't configured
  707. * MT forcewake, and if the device is in RC6, then
  708. * force_wake_mt_get will not wake the device and the
  709. * ECOBUS read will return zero. Which will be
  710. * (correctly) interpreted by the test below as MT
  711. * forcewake being disabled.
  712. */
  713. mutex_lock(&dev->struct_mutex);
  714. __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
  715. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  716. __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
  717. mutex_unlock(&dev->struct_mutex);
  718. if (ecobus & FORCEWAKE_MT_ENABLE) {
  719. dev_priv->uncore.funcs.force_wake_get =
  720. __gen7_gt_force_wake_mt_get;
  721. dev_priv->uncore.funcs.force_wake_put =
  722. __gen7_gt_force_wake_mt_put;
  723. } else {
  724. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  725. DRM_INFO("when using vblank-synced partial screen updates.\n");
  726. dev_priv->uncore.funcs.force_wake_get =
  727. __gen6_gt_force_wake_get;
  728. dev_priv->uncore.funcs.force_wake_put =
  729. __gen6_gt_force_wake_put;
  730. }
  731. } else if (IS_GEN6(dev)) {
  732. dev_priv->uncore.funcs.force_wake_get =
  733. __gen6_gt_force_wake_get;
  734. dev_priv->uncore.funcs.force_wake_put =
  735. __gen6_gt_force_wake_put;
  736. }
  737. switch (INTEL_INFO(dev)->gen) {
  738. default:
  739. if (IS_CHERRYVIEW(dev)) {
  740. dev_priv->uncore.funcs.mmio_writeb = chv_write8;
  741. dev_priv->uncore.funcs.mmio_writew = chv_write16;
  742. dev_priv->uncore.funcs.mmio_writel = chv_write32;
  743. dev_priv->uncore.funcs.mmio_writeq = chv_write64;
  744. dev_priv->uncore.funcs.mmio_readb = chv_read8;
  745. dev_priv->uncore.funcs.mmio_readw = chv_read16;
  746. dev_priv->uncore.funcs.mmio_readl = chv_read32;
  747. dev_priv->uncore.funcs.mmio_readq = chv_read64;
  748. } else {
  749. dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
  750. dev_priv->uncore.funcs.mmio_writew = gen8_write16;
  751. dev_priv->uncore.funcs.mmio_writel = gen8_write32;
  752. dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
  753. dev_priv->uncore.funcs.mmio_readb = gen6_read8;
  754. dev_priv->uncore.funcs.mmio_readw = gen6_read16;
  755. dev_priv->uncore.funcs.mmio_readl = gen6_read32;
  756. dev_priv->uncore.funcs.mmio_readq = gen6_read64;
  757. }
  758. break;
  759. case 7:
  760. case 6:
  761. if (IS_HASWELL(dev)) {
  762. dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
  763. dev_priv->uncore.funcs.mmio_writew = hsw_write16;
  764. dev_priv->uncore.funcs.mmio_writel = hsw_write32;
  765. dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
  766. } else {
  767. dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
  768. dev_priv->uncore.funcs.mmio_writew = gen6_write16;
  769. dev_priv->uncore.funcs.mmio_writel = gen6_write32;
  770. dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
  771. }
  772. if (IS_VALLEYVIEW(dev)) {
  773. dev_priv->uncore.funcs.mmio_readb = vlv_read8;
  774. dev_priv->uncore.funcs.mmio_readw = vlv_read16;
  775. dev_priv->uncore.funcs.mmio_readl = vlv_read32;
  776. dev_priv->uncore.funcs.mmio_readq = vlv_read64;
  777. } else {
  778. dev_priv->uncore.funcs.mmio_readb = gen6_read8;
  779. dev_priv->uncore.funcs.mmio_readw = gen6_read16;
  780. dev_priv->uncore.funcs.mmio_readl = gen6_read32;
  781. dev_priv->uncore.funcs.mmio_readq = gen6_read64;
  782. }
  783. break;
  784. case 5:
  785. dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
  786. dev_priv->uncore.funcs.mmio_writew = gen5_write16;
  787. dev_priv->uncore.funcs.mmio_writel = gen5_write32;
  788. dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
  789. dev_priv->uncore.funcs.mmio_readb = gen5_read8;
  790. dev_priv->uncore.funcs.mmio_readw = gen5_read16;
  791. dev_priv->uncore.funcs.mmio_readl = gen5_read32;
  792. dev_priv->uncore.funcs.mmio_readq = gen5_read64;
  793. break;
  794. case 4:
  795. case 3:
  796. case 2:
  797. dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
  798. dev_priv->uncore.funcs.mmio_writew = gen4_write16;
  799. dev_priv->uncore.funcs.mmio_writel = gen4_write32;
  800. dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
  801. dev_priv->uncore.funcs.mmio_readb = gen4_read8;
  802. dev_priv->uncore.funcs.mmio_readw = gen4_read16;
  803. dev_priv->uncore.funcs.mmio_readl = gen4_read32;
  804. dev_priv->uncore.funcs.mmio_readq = gen4_read64;
  805. break;
  806. }
  807. }
  808. void intel_uncore_fini(struct drm_device *dev)
  809. {
  810. /* Paranoia: make sure we have disabled everything before we exit. */
  811. intel_uncore_sanitize(dev);
  812. intel_uncore_forcewake_reset(dev, false);
  813. }
  814. #define GEN_RANGE(l, h) GENMASK(h, l)
  815. static const struct register_whitelist {
  816. uint64_t offset;
  817. uint32_t size;
  818. /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  819. uint32_t gen_bitmask;
  820. } whitelist[] = {
  821. { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
  822. };
  823. int i915_reg_read_ioctl(struct drm_device *dev,
  824. void *data, struct drm_file *file)
  825. {
  826. struct drm_i915_private *dev_priv = dev->dev_private;
  827. struct drm_i915_reg_read *reg = data;
  828. struct register_whitelist const *entry = whitelist;
  829. int i, ret = 0;
  830. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  831. if (entry->offset == reg->offset &&
  832. (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  833. break;
  834. }
  835. if (i == ARRAY_SIZE(whitelist))
  836. return -EINVAL;
  837. intel_runtime_pm_get(dev_priv);
  838. switch (entry->size) {
  839. case 8:
  840. reg->val = I915_READ64(reg->offset);
  841. break;
  842. case 4:
  843. reg->val = I915_READ(reg->offset);
  844. break;
  845. case 2:
  846. reg->val = I915_READ16(reg->offset);
  847. break;
  848. case 1:
  849. reg->val = I915_READ8(reg->offset);
  850. break;
  851. default:
  852. WARN_ON(1);
  853. ret = -EINVAL;
  854. goto out;
  855. }
  856. out:
  857. intel_runtime_pm_put(dev_priv);
  858. return ret;
  859. }
  860. int i915_get_reset_stats_ioctl(struct drm_device *dev,
  861. void *data, struct drm_file *file)
  862. {
  863. struct drm_i915_private *dev_priv = dev->dev_private;
  864. struct drm_i915_reset_stats *args = data;
  865. struct i915_ctx_hang_stats *hs;
  866. struct intel_context *ctx;
  867. int ret;
  868. if (args->flags || args->pad)
  869. return -EINVAL;
  870. if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
  871. return -EPERM;
  872. ret = mutex_lock_interruptible(&dev->struct_mutex);
  873. if (ret)
  874. return ret;
  875. ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
  876. if (IS_ERR(ctx)) {
  877. mutex_unlock(&dev->struct_mutex);
  878. return PTR_ERR(ctx);
  879. }
  880. hs = &ctx->hang_stats;
  881. if (capable(CAP_SYS_ADMIN))
  882. args->reset_count = i915_reset_count(&dev_priv->gpu_error);
  883. else
  884. args->reset_count = 0;
  885. args->batch_active = hs->batch_active;
  886. args->batch_pending = hs->batch_pending;
  887. mutex_unlock(&dev->struct_mutex);
  888. return 0;
  889. }
  890. static int i965_reset_complete(struct drm_device *dev)
  891. {
  892. u8 gdrst;
  893. pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
  894. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  895. }
  896. static int i965_do_reset(struct drm_device *dev)
  897. {
  898. int ret;
  899. /* FIXME: i965g/gm need a display save/restore for gpu reset. */
  900. return -ENODEV;
  901. /*
  902. * Set the domains we want to reset (GRDOM/bits 2 and 3) as
  903. * well as the reset bit (GR/bit 0). Setting the GR bit
  904. * triggers the reset; when done, the hardware will clear it.
  905. */
  906. pci_write_config_byte(dev->pdev, I965_GDRST,
  907. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  908. ret = wait_for(i965_reset_complete(dev), 500);
  909. if (ret)
  910. return ret;
  911. pci_write_config_byte(dev->pdev, I965_GDRST,
  912. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  913. ret = wait_for(i965_reset_complete(dev), 500);
  914. if (ret)
  915. return ret;
  916. pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  917. return 0;
  918. }
  919. static int g4x_do_reset(struct drm_device *dev)
  920. {
  921. struct drm_i915_private *dev_priv = dev->dev_private;
  922. int ret;
  923. pci_write_config_byte(dev->pdev, I965_GDRST,
  924. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  925. ret = wait_for(i965_reset_complete(dev), 500);
  926. if (ret)
  927. return ret;
  928. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  929. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  930. POSTING_READ(VDECCLK_GATE_D);
  931. pci_write_config_byte(dev->pdev, I965_GDRST,
  932. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  933. ret = wait_for(i965_reset_complete(dev), 500);
  934. if (ret)
  935. return ret;
  936. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  937. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  938. POSTING_READ(VDECCLK_GATE_D);
  939. pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  940. return 0;
  941. }
  942. static int ironlake_do_reset(struct drm_device *dev)
  943. {
  944. struct drm_i915_private *dev_priv = dev->dev_private;
  945. int ret;
  946. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  947. ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  948. ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  949. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  950. if (ret)
  951. return ret;
  952. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  953. ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  954. ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  955. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  956. if (ret)
  957. return ret;
  958. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
  959. return 0;
  960. }
  961. static int gen6_do_reset(struct drm_device *dev)
  962. {
  963. struct drm_i915_private *dev_priv = dev->dev_private;
  964. int ret;
  965. /* Reset the chip */
  966. /* GEN6_GDRST is not in the gt power well, no need to check
  967. * for fifo space for the write or forcewake the chip for
  968. * the read
  969. */
  970. __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  971. /* Spin waiting for the device to ack the reset request */
  972. ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  973. intel_uncore_forcewake_reset(dev, true);
  974. return ret;
  975. }
  976. int intel_gpu_reset(struct drm_device *dev)
  977. {
  978. switch (INTEL_INFO(dev)->gen) {
  979. case 8:
  980. case 7:
  981. case 6: return gen6_do_reset(dev);
  982. case 5: return ironlake_do_reset(dev);
  983. case 4:
  984. if (IS_G4X(dev))
  985. return g4x_do_reset(dev);
  986. else
  987. return i965_do_reset(dev);
  988. default: return -ENODEV;
  989. }
  990. }
  991. void intel_uncore_check_errors(struct drm_device *dev)
  992. {
  993. struct drm_i915_private *dev_priv = dev->dev_private;
  994. if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  995. (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  996. DRM_ERROR("Unclaimed register before interrupt\n");
  997. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  998. }
  999. }