intel_uncore.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  26. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  27. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  28. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  29. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  30. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  31. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  32. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  34. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  35. static void
  36. assert_device_not_suspended(struct drm_i915_private *dev_priv)
  37. {
  38. WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
  39. "Device suspended\n");
  40. }
  41. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  42. {
  43. u32 gt_thread_status_mask;
  44. if (IS_HASWELL(dev_priv->dev))
  45. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
  46. else
  47. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
  48. /* w/a for a sporadic read returning 0 by waiting for the GT
  49. * thread to wake up.
  50. */
  51. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
  52. DRM_ERROR("GT thread status wait timed out\n");
  53. }
  54. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  55. {
  56. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  57. /* something from same cacheline, but !FORCEWAKE */
  58. __raw_posting_read(dev_priv, ECOBUS);
  59. }
  60. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
  61. int fw_engine)
  62. {
  63. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
  64. FORCEWAKE_ACK_TIMEOUT_MS))
  65. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  66. __raw_i915_write32(dev_priv, FORCEWAKE, 1);
  67. /* something from same cacheline, but !FORCEWAKE */
  68. __raw_posting_read(dev_priv, ECOBUS);
  69. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
  70. FORCEWAKE_ACK_TIMEOUT_MS))
  71. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  72. /* WaRsForcewakeWaitTC0:snb */
  73. __gen6_gt_wait_for_thread_c0(dev_priv);
  74. }
  75. static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  76. {
  77. __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  78. /* something from same cacheline, but !FORCEWAKE_MT */
  79. __raw_posting_read(dev_priv, ECOBUS);
  80. }
  81. static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
  82. int fw_engine)
  83. {
  84. u32 forcewake_ack;
  85. if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
  86. forcewake_ack = FORCEWAKE_ACK_HSW;
  87. else
  88. forcewake_ack = FORCEWAKE_MT_ACK;
  89. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  90. FORCEWAKE_ACK_TIMEOUT_MS))
  91. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  92. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  93. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  94. /* something from same cacheline, but !FORCEWAKE_MT */
  95. __raw_posting_read(dev_priv, ECOBUS);
  96. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
  97. FORCEWAKE_ACK_TIMEOUT_MS))
  98. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  99. /* WaRsForcewakeWaitTC0:ivb,hsw */
  100. if (INTEL_INFO(dev_priv->dev)->gen < 8)
  101. __gen6_gt_wait_for_thread_c0(dev_priv);
  102. }
  103. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  104. {
  105. u32 gtfifodbg;
  106. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  107. if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  108. __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  109. }
  110. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
  111. int fw_engine)
  112. {
  113. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  114. /* something from same cacheline, but !FORCEWAKE */
  115. __raw_posting_read(dev_priv, ECOBUS);
  116. gen6_gt_check_fifodbg(dev_priv);
  117. }
  118. static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
  119. int fw_engine)
  120. {
  121. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  122. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  123. /* something from same cacheline, but !FORCEWAKE_MT */
  124. __raw_posting_read(dev_priv, ECOBUS);
  125. if (IS_GEN7(dev_priv->dev))
  126. gen6_gt_check_fifodbg(dev_priv);
  127. }
  128. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  129. {
  130. int ret = 0;
  131. /* On VLV, FIFO will be shared by both SW and HW.
  132. * So, we need to read the FREE_ENTRIES everytime */
  133. if (IS_VALLEYVIEW(dev_priv->dev))
  134. dev_priv->uncore.fifo_count =
  135. __raw_i915_read32(dev_priv, GTFIFOCTL) &
  136. GT_FIFO_FREE_ENTRIES_MASK;
  137. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  138. int loop = 500;
  139. u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  140. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  141. udelay(10);
  142. fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  143. }
  144. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  145. ++ret;
  146. dev_priv->uncore.fifo_count = fifo;
  147. }
  148. dev_priv->uncore.fifo_count--;
  149. return ret;
  150. }
  151. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  152. {
  153. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  154. _MASKED_BIT_DISABLE(0xffff));
  155. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  156. _MASKED_BIT_DISABLE(0xffff));
  157. /* something from same cacheline, but !FORCEWAKE_VLV */
  158. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  159. }
  160. static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
  161. int fw_engine)
  162. {
  163. /* Check for Render Engine */
  164. if (FORCEWAKE_RENDER & fw_engine) {
  165. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  166. FORCEWAKE_ACK_VLV) &
  167. FORCEWAKE_KERNEL) == 0,
  168. FORCEWAKE_ACK_TIMEOUT_MS))
  169. DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  170. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  171. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  172. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  173. FORCEWAKE_ACK_VLV) &
  174. FORCEWAKE_KERNEL),
  175. FORCEWAKE_ACK_TIMEOUT_MS))
  176. DRM_ERROR("Timed out: waiting for Render to ack.\n");
  177. }
  178. /* Check for Media Engine */
  179. if (FORCEWAKE_MEDIA & fw_engine) {
  180. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  181. FORCEWAKE_ACK_MEDIA_VLV) &
  182. FORCEWAKE_KERNEL) == 0,
  183. FORCEWAKE_ACK_TIMEOUT_MS))
  184. DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
  185. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  186. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  187. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  188. FORCEWAKE_ACK_MEDIA_VLV) &
  189. FORCEWAKE_KERNEL),
  190. FORCEWAKE_ACK_TIMEOUT_MS))
  191. DRM_ERROR("Timed out: waiting for media to ack.\n");
  192. }
  193. /* WaRsForcewakeWaitTC0:vlv */
  194. if (!IS_CHERRYVIEW(dev_priv->dev))
  195. __gen6_gt_wait_for_thread_c0(dev_priv);
  196. }
  197. static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
  198. int fw_engine)
  199. {
  200. /* Check for Render Engine */
  201. if (FORCEWAKE_RENDER & fw_engine)
  202. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  203. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  204. /* Check for Media Engine */
  205. if (FORCEWAKE_MEDIA & fw_engine)
  206. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  207. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  208. /* something from same cacheline, but !FORCEWAKE_VLV */
  209. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  210. if (!IS_CHERRYVIEW(dev_priv->dev))
  211. gen6_gt_check_fifodbg(dev_priv);
  212. }
  213. static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  214. {
  215. unsigned long irqflags;
  216. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  217. if (fw_engine & FORCEWAKE_RENDER &&
  218. dev_priv->uncore.fw_rendercount++ != 0)
  219. fw_engine &= ~FORCEWAKE_RENDER;
  220. if (fw_engine & FORCEWAKE_MEDIA &&
  221. dev_priv->uncore.fw_mediacount++ != 0)
  222. fw_engine &= ~FORCEWAKE_MEDIA;
  223. if (fw_engine)
  224. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
  225. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  226. }
  227. static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  228. {
  229. unsigned long irqflags;
  230. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  231. if (fw_engine & FORCEWAKE_RENDER) {
  232. WARN_ON(!dev_priv->uncore.fw_rendercount);
  233. if (--dev_priv->uncore.fw_rendercount != 0)
  234. fw_engine &= ~FORCEWAKE_RENDER;
  235. }
  236. if (fw_engine & FORCEWAKE_MEDIA) {
  237. WARN_ON(!dev_priv->uncore.fw_mediacount);
  238. if (--dev_priv->uncore.fw_mediacount != 0)
  239. fw_engine &= ~FORCEWAKE_MEDIA;
  240. }
  241. if (fw_engine)
  242. dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
  243. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  244. }
  245. static void gen6_force_wake_timer(unsigned long arg)
  246. {
  247. struct drm_i915_private *dev_priv = (void *)arg;
  248. unsigned long irqflags;
  249. assert_device_not_suspended(dev_priv);
  250. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  251. WARN_ON(!dev_priv->uncore.forcewake_count);
  252. if (--dev_priv->uncore.forcewake_count == 0)
  253. dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
  254. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  255. intel_runtime_pm_put(dev_priv);
  256. }
  257. void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
  258. {
  259. struct drm_i915_private *dev_priv = dev->dev_private;
  260. unsigned long irqflags;
  261. if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
  262. gen6_force_wake_timer((unsigned long)dev_priv);
  263. /* Hold uncore.lock across reset to prevent any register access
  264. * with forcewake not set correctly
  265. */
  266. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  267. if (IS_VALLEYVIEW(dev))
  268. vlv_force_wake_reset(dev_priv);
  269. else if (IS_GEN6(dev) || IS_GEN7(dev))
  270. __gen6_gt_force_wake_reset(dev_priv);
  271. if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
  272. __gen7_gt_force_wake_mt_reset(dev_priv);
  273. if (restore) { /* If reset with a user forcewake, try to restore */
  274. unsigned fw = 0;
  275. if (IS_VALLEYVIEW(dev)) {
  276. if (dev_priv->uncore.fw_rendercount)
  277. fw |= FORCEWAKE_RENDER;
  278. if (dev_priv->uncore.fw_mediacount)
  279. fw |= FORCEWAKE_MEDIA;
  280. } else {
  281. if (dev_priv->uncore.forcewake_count)
  282. fw = FORCEWAKE_ALL;
  283. }
  284. if (fw)
  285. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  286. if (IS_GEN6(dev) || IS_GEN7(dev))
  287. dev_priv->uncore.fifo_count =
  288. __raw_i915_read32(dev_priv, GTFIFOCTL) &
  289. GT_FIFO_FREE_ENTRIES_MASK;
  290. }
  291. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  292. }
  293. void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
  294. {
  295. struct drm_i915_private *dev_priv = dev->dev_private;
  296. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  297. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  298. if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
  299. (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
  300. /* The docs do not explain exactly how the calculation can be
  301. * made. It is somewhat guessable, but for now, it's always
  302. * 128MB.
  303. * NB: We can't write IDICR yet because we do not have gt funcs
  304. * set up */
  305. dev_priv->ellc_size = 128;
  306. DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  307. }
  308. /* clear out old GT FIFO errors */
  309. if (IS_GEN6(dev) || IS_GEN7(dev))
  310. __raw_i915_write32(dev_priv, GTFIFODBG,
  311. __raw_i915_read32(dev_priv, GTFIFODBG));
  312. intel_uncore_forcewake_reset(dev, restore_forcewake);
  313. }
  314. void intel_uncore_sanitize(struct drm_device *dev)
  315. {
  316. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  317. intel_disable_gt_powersave(dev);
  318. }
  319. /*
  320. * Generally this is called implicitly by the register read function. However,
  321. * if some sequence requires the GT to not power down then this function should
  322. * be called at the beginning of the sequence followed by a call to
  323. * gen6_gt_force_wake_put() at the end of the sequence.
  324. */
  325. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  326. {
  327. unsigned long irqflags;
  328. if (!dev_priv->uncore.funcs.force_wake_get)
  329. return;
  330. intel_runtime_pm_get(dev_priv);
  331. /* Redirect to VLV specific routine */
  332. if (IS_VALLEYVIEW(dev_priv->dev))
  333. return vlv_force_wake_get(dev_priv, fw_engine);
  334. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  335. if (dev_priv->uncore.forcewake_count++ == 0)
  336. dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
  337. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  338. }
  339. /*
  340. * see gen6_gt_force_wake_get()
  341. */
  342. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  343. {
  344. unsigned long irqflags;
  345. bool delayed = false;
  346. if (!dev_priv->uncore.funcs.force_wake_put)
  347. return;
  348. /* Redirect to VLV specific routine */
  349. if (IS_VALLEYVIEW(dev_priv->dev)) {
  350. vlv_force_wake_put(dev_priv, fw_engine);
  351. goto out;
  352. }
  353. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  354. WARN_ON(!dev_priv->uncore.forcewake_count);
  355. if (--dev_priv->uncore.forcewake_count == 0) {
  356. dev_priv->uncore.forcewake_count++;
  357. delayed = true;
  358. mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
  359. jiffies + 1);
  360. }
  361. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  362. out:
  363. if (!delayed)
  364. intel_runtime_pm_put(dev_priv);
  365. }
  366. void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
  367. {
  368. if (!dev_priv->uncore.funcs.force_wake_get)
  369. return;
  370. WARN_ON(dev_priv->uncore.forcewake_count > 0);
  371. }
  372. /* We give fast paths for the really cool registers */
  373. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  374. ((reg) < 0x40000 && (reg) != FORCEWAKE)
  375. #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
  376. #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
  377. (REG_RANGE((reg), 0x2000, 0x4000) || \
  378. REG_RANGE((reg), 0x5000, 0x8000) || \
  379. REG_RANGE((reg), 0xB000, 0x12000) || \
  380. REG_RANGE((reg), 0x2E000, 0x30000))
  381. #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
  382. (REG_RANGE((reg), 0x12000, 0x14000) || \
  383. REG_RANGE((reg), 0x22000, 0x24000) || \
  384. REG_RANGE((reg), 0x30000, 0x40000))
  385. #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
  386. (REG_RANGE((reg), 0x2000, 0x4000) || \
  387. REG_RANGE((reg), 0x5000, 0x8000) || \
  388. REG_RANGE((reg), 0x8300, 0x8500) || \
  389. REG_RANGE((reg), 0xB000, 0xC000) || \
  390. REG_RANGE((reg), 0xE000, 0xE800))
  391. #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
  392. (REG_RANGE((reg), 0x8800, 0x8900) || \
  393. REG_RANGE((reg), 0xD000, 0xD800) || \
  394. REG_RANGE((reg), 0x12000, 0x14000) || \
  395. REG_RANGE((reg), 0x1A000, 0x1C000) || \
  396. REG_RANGE((reg), 0x1E800, 0x1EA00) || \
  397. REG_RANGE((reg), 0x30000, 0x40000))
  398. #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
  399. (REG_RANGE((reg), 0x4000, 0x5000) || \
  400. REG_RANGE((reg), 0x8000, 0x8300) || \
  401. REG_RANGE((reg), 0x8500, 0x8600) || \
  402. REG_RANGE((reg), 0x9000, 0xB000) || \
  403. REG_RANGE((reg), 0xC000, 0xC800) || \
  404. REG_RANGE((reg), 0xF000, 0x10000) || \
  405. REG_RANGE((reg), 0x14000, 0x14400) || \
  406. REG_RANGE((reg), 0x22000, 0x24000))
  407. static void
  408. ilk_dummy_write(struct drm_i915_private *dev_priv)
  409. {
  410. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  411. * the chip from rc6 before touching it for real. MI_MODE is masked,
  412. * hence harmless to write 0 into. */
  413. __raw_i915_write32(dev_priv, MI_MODE, 0);
  414. }
  415. static void
  416. hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
  417. bool before)
  418. {
  419. const char *op = read ? "reading" : "writing to";
  420. const char *when = before ? "before" : "after";
  421. if (!i915.mmio_debug)
  422. return;
  423. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  424. WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
  425. when, op, reg);
  426. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  427. }
  428. }
  429. static void
  430. hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
  431. {
  432. if (i915.mmio_debug)
  433. return;
  434. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  435. DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
  436. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  437. }
  438. }
  439. #define REG_READ_HEADER(x) \
  440. unsigned long irqflags; \
  441. u##x val = 0; \
  442. assert_device_not_suspended(dev_priv); \
  443. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  444. #define REG_READ_FOOTER \
  445. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  446. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  447. return val
  448. #define __gen4_read(x) \
  449. static u##x \
  450. gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  451. REG_READ_HEADER(x); \
  452. val = __raw_i915_read##x(dev_priv, reg); \
  453. REG_READ_FOOTER; \
  454. }
  455. #define __gen5_read(x) \
  456. static u##x \
  457. gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  458. REG_READ_HEADER(x); \
  459. ilk_dummy_write(dev_priv); \
  460. val = __raw_i915_read##x(dev_priv, reg); \
  461. REG_READ_FOOTER; \
  462. }
  463. #define __gen6_read(x) \
  464. static u##x \
  465. gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  466. REG_READ_HEADER(x); \
  467. hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
  468. if (dev_priv->uncore.forcewake_count == 0 && \
  469. NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  470. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  471. FORCEWAKE_ALL); \
  472. val = __raw_i915_read##x(dev_priv, reg); \
  473. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  474. FORCEWAKE_ALL); \
  475. } else { \
  476. val = __raw_i915_read##x(dev_priv, reg); \
  477. } \
  478. hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
  479. REG_READ_FOOTER; \
  480. }
  481. #define __vlv_read(x) \
  482. static u##x \
  483. vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  484. unsigned fwengine = 0; \
  485. REG_READ_HEADER(x); \
  486. if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
  487. if (dev_priv->uncore.fw_rendercount == 0) \
  488. fwengine = FORCEWAKE_RENDER; \
  489. } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
  490. if (dev_priv->uncore.fw_mediacount == 0) \
  491. fwengine = FORCEWAKE_MEDIA; \
  492. } \
  493. if (fwengine) \
  494. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  495. val = __raw_i915_read##x(dev_priv, reg); \
  496. if (fwengine) \
  497. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  498. REG_READ_FOOTER; \
  499. }
  500. #define __chv_read(x) \
  501. static u##x \
  502. chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  503. unsigned fwengine = 0; \
  504. REG_READ_HEADER(x); \
  505. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  506. if (dev_priv->uncore.fw_rendercount == 0) \
  507. fwengine = FORCEWAKE_RENDER; \
  508. } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  509. if (dev_priv->uncore.fw_mediacount == 0) \
  510. fwengine = FORCEWAKE_MEDIA; \
  511. } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  512. if (dev_priv->uncore.fw_rendercount == 0) \
  513. fwengine |= FORCEWAKE_RENDER; \
  514. if (dev_priv->uncore.fw_mediacount == 0) \
  515. fwengine |= FORCEWAKE_MEDIA; \
  516. } \
  517. if (fwengine) \
  518. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  519. val = __raw_i915_read##x(dev_priv, reg); \
  520. if (fwengine) \
  521. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  522. REG_READ_FOOTER; \
  523. }
  524. __chv_read(8)
  525. __chv_read(16)
  526. __chv_read(32)
  527. __chv_read(64)
  528. __vlv_read(8)
  529. __vlv_read(16)
  530. __vlv_read(32)
  531. __vlv_read(64)
  532. __gen6_read(8)
  533. __gen6_read(16)
  534. __gen6_read(32)
  535. __gen6_read(64)
  536. __gen5_read(8)
  537. __gen5_read(16)
  538. __gen5_read(32)
  539. __gen5_read(64)
  540. __gen4_read(8)
  541. __gen4_read(16)
  542. __gen4_read(32)
  543. __gen4_read(64)
  544. #undef __chv_read
  545. #undef __vlv_read
  546. #undef __gen6_read
  547. #undef __gen5_read
  548. #undef __gen4_read
  549. #undef REG_READ_FOOTER
  550. #undef REG_READ_HEADER
  551. #define REG_WRITE_HEADER \
  552. unsigned long irqflags; \
  553. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  554. assert_device_not_suspended(dev_priv); \
  555. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  556. #define REG_WRITE_FOOTER \
  557. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  558. #define __gen4_write(x) \
  559. static void \
  560. gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  561. REG_WRITE_HEADER; \
  562. __raw_i915_write##x(dev_priv, reg, val); \
  563. REG_WRITE_FOOTER; \
  564. }
  565. #define __gen5_write(x) \
  566. static void \
  567. gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  568. REG_WRITE_HEADER; \
  569. ilk_dummy_write(dev_priv); \
  570. __raw_i915_write##x(dev_priv, reg, val); \
  571. REG_WRITE_FOOTER; \
  572. }
  573. #define __gen6_write(x) \
  574. static void \
  575. gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  576. u32 __fifo_ret = 0; \
  577. REG_WRITE_HEADER; \
  578. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  579. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  580. } \
  581. __raw_i915_write##x(dev_priv, reg, val); \
  582. if (unlikely(__fifo_ret)) { \
  583. gen6_gt_check_fifodbg(dev_priv); \
  584. } \
  585. REG_WRITE_FOOTER; \
  586. }
  587. #define __hsw_write(x) \
  588. static void \
  589. hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  590. u32 __fifo_ret = 0; \
  591. REG_WRITE_HEADER; \
  592. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  593. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  594. } \
  595. hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  596. __raw_i915_write##x(dev_priv, reg, val); \
  597. if (unlikely(__fifo_ret)) { \
  598. gen6_gt_check_fifodbg(dev_priv); \
  599. } \
  600. hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  601. hsw_unclaimed_reg_detect(dev_priv); \
  602. REG_WRITE_FOOTER; \
  603. }
  604. static const u32 gen8_shadowed_regs[] = {
  605. FORCEWAKE_MT,
  606. GEN6_RPNSWREQ,
  607. GEN6_RC_VIDEO_FREQ,
  608. RING_TAIL(RENDER_RING_BASE),
  609. RING_TAIL(GEN6_BSD_RING_BASE),
  610. RING_TAIL(VEBOX_RING_BASE),
  611. RING_TAIL(BLT_RING_BASE),
  612. /* TODO: Other registers are not yet used */
  613. };
  614. static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  615. {
  616. int i;
  617. for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  618. if (reg == gen8_shadowed_regs[i])
  619. return true;
  620. return false;
  621. }
  622. #define __gen8_write(x) \
  623. static void \
  624. gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  625. REG_WRITE_HEADER; \
  626. hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  627. if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
  628. if (dev_priv->uncore.forcewake_count == 0) \
  629. dev_priv->uncore.funcs.force_wake_get(dev_priv, \
  630. FORCEWAKE_ALL); \
  631. __raw_i915_write##x(dev_priv, reg, val); \
  632. if (dev_priv->uncore.forcewake_count == 0) \
  633. dev_priv->uncore.funcs.force_wake_put(dev_priv, \
  634. FORCEWAKE_ALL); \
  635. } else { \
  636. __raw_i915_write##x(dev_priv, reg, val); \
  637. } \
  638. hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  639. hsw_unclaimed_reg_detect(dev_priv); \
  640. REG_WRITE_FOOTER; \
  641. }
  642. #define __chv_write(x) \
  643. static void \
  644. chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  645. unsigned fwengine = 0; \
  646. bool shadowed = is_gen8_shadowed(dev_priv, reg); \
  647. REG_WRITE_HEADER; \
  648. if (!shadowed) { \
  649. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
  650. if (dev_priv->uncore.fw_rendercount == 0) \
  651. fwengine = FORCEWAKE_RENDER; \
  652. } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
  653. if (dev_priv->uncore.fw_mediacount == 0) \
  654. fwengine = FORCEWAKE_MEDIA; \
  655. } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
  656. if (dev_priv->uncore.fw_rendercount == 0) \
  657. fwengine |= FORCEWAKE_RENDER; \
  658. if (dev_priv->uncore.fw_mediacount == 0) \
  659. fwengine |= FORCEWAKE_MEDIA; \
  660. } \
  661. } \
  662. if (fwengine) \
  663. dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
  664. __raw_i915_write##x(dev_priv, reg, val); \
  665. if (fwengine) \
  666. dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
  667. REG_WRITE_FOOTER; \
  668. }
  669. __chv_write(8)
  670. __chv_write(16)
  671. __chv_write(32)
  672. __chv_write(64)
  673. __gen8_write(8)
  674. __gen8_write(16)
  675. __gen8_write(32)
  676. __gen8_write(64)
  677. __hsw_write(8)
  678. __hsw_write(16)
  679. __hsw_write(32)
  680. __hsw_write(64)
  681. __gen6_write(8)
  682. __gen6_write(16)
  683. __gen6_write(32)
  684. __gen6_write(64)
  685. __gen5_write(8)
  686. __gen5_write(16)
  687. __gen5_write(32)
  688. __gen5_write(64)
  689. __gen4_write(8)
  690. __gen4_write(16)
  691. __gen4_write(32)
  692. __gen4_write(64)
  693. #undef __chv_write
  694. #undef __gen8_write
  695. #undef __hsw_write
  696. #undef __gen6_write
  697. #undef __gen5_write
  698. #undef __gen4_write
  699. #undef REG_WRITE_FOOTER
  700. #undef REG_WRITE_HEADER
  701. void intel_uncore_init(struct drm_device *dev)
  702. {
  703. struct drm_i915_private *dev_priv = dev->dev_private;
  704. setup_timer(&dev_priv->uncore.force_wake_timer,
  705. gen6_force_wake_timer, (unsigned long)dev_priv);
  706. intel_uncore_early_sanitize(dev, false);
  707. if (IS_VALLEYVIEW(dev)) {
  708. dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
  709. dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
  710. } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
  711. dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
  712. dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
  713. } else if (IS_IVYBRIDGE(dev)) {
  714. u32 ecobus;
  715. /* IVB configs may use multi-threaded forcewake */
  716. /* A small trick here - if the bios hasn't configured
  717. * MT forcewake, and if the device is in RC6, then
  718. * force_wake_mt_get will not wake the device and the
  719. * ECOBUS read will return zero. Which will be
  720. * (correctly) interpreted by the test below as MT
  721. * forcewake being disabled.
  722. */
  723. mutex_lock(&dev->struct_mutex);
  724. __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
  725. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  726. __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
  727. mutex_unlock(&dev->struct_mutex);
  728. if (ecobus & FORCEWAKE_MT_ENABLE) {
  729. dev_priv->uncore.funcs.force_wake_get =
  730. __gen7_gt_force_wake_mt_get;
  731. dev_priv->uncore.funcs.force_wake_put =
  732. __gen7_gt_force_wake_mt_put;
  733. } else {
  734. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  735. DRM_INFO("when using vblank-synced partial screen updates.\n");
  736. dev_priv->uncore.funcs.force_wake_get =
  737. __gen6_gt_force_wake_get;
  738. dev_priv->uncore.funcs.force_wake_put =
  739. __gen6_gt_force_wake_put;
  740. }
  741. } else if (IS_GEN6(dev)) {
  742. dev_priv->uncore.funcs.force_wake_get =
  743. __gen6_gt_force_wake_get;
  744. dev_priv->uncore.funcs.force_wake_put =
  745. __gen6_gt_force_wake_put;
  746. }
  747. switch (INTEL_INFO(dev)->gen) {
  748. default:
  749. if (IS_CHERRYVIEW(dev)) {
  750. dev_priv->uncore.funcs.mmio_writeb = chv_write8;
  751. dev_priv->uncore.funcs.mmio_writew = chv_write16;
  752. dev_priv->uncore.funcs.mmio_writel = chv_write32;
  753. dev_priv->uncore.funcs.mmio_writeq = chv_write64;
  754. dev_priv->uncore.funcs.mmio_readb = chv_read8;
  755. dev_priv->uncore.funcs.mmio_readw = chv_read16;
  756. dev_priv->uncore.funcs.mmio_readl = chv_read32;
  757. dev_priv->uncore.funcs.mmio_readq = chv_read64;
  758. } else {
  759. dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
  760. dev_priv->uncore.funcs.mmio_writew = gen8_write16;
  761. dev_priv->uncore.funcs.mmio_writel = gen8_write32;
  762. dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
  763. dev_priv->uncore.funcs.mmio_readb = gen6_read8;
  764. dev_priv->uncore.funcs.mmio_readw = gen6_read16;
  765. dev_priv->uncore.funcs.mmio_readl = gen6_read32;
  766. dev_priv->uncore.funcs.mmio_readq = gen6_read64;
  767. }
  768. break;
  769. case 7:
  770. case 6:
  771. if (IS_HASWELL(dev)) {
  772. dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
  773. dev_priv->uncore.funcs.mmio_writew = hsw_write16;
  774. dev_priv->uncore.funcs.mmio_writel = hsw_write32;
  775. dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
  776. } else {
  777. dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
  778. dev_priv->uncore.funcs.mmio_writew = gen6_write16;
  779. dev_priv->uncore.funcs.mmio_writel = gen6_write32;
  780. dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
  781. }
  782. if (IS_VALLEYVIEW(dev)) {
  783. dev_priv->uncore.funcs.mmio_readb = vlv_read8;
  784. dev_priv->uncore.funcs.mmio_readw = vlv_read16;
  785. dev_priv->uncore.funcs.mmio_readl = vlv_read32;
  786. dev_priv->uncore.funcs.mmio_readq = vlv_read64;
  787. } else {
  788. dev_priv->uncore.funcs.mmio_readb = gen6_read8;
  789. dev_priv->uncore.funcs.mmio_readw = gen6_read16;
  790. dev_priv->uncore.funcs.mmio_readl = gen6_read32;
  791. dev_priv->uncore.funcs.mmio_readq = gen6_read64;
  792. }
  793. break;
  794. case 5:
  795. dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
  796. dev_priv->uncore.funcs.mmio_writew = gen5_write16;
  797. dev_priv->uncore.funcs.mmio_writel = gen5_write32;
  798. dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
  799. dev_priv->uncore.funcs.mmio_readb = gen5_read8;
  800. dev_priv->uncore.funcs.mmio_readw = gen5_read16;
  801. dev_priv->uncore.funcs.mmio_readl = gen5_read32;
  802. dev_priv->uncore.funcs.mmio_readq = gen5_read64;
  803. break;
  804. case 4:
  805. case 3:
  806. case 2:
  807. dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
  808. dev_priv->uncore.funcs.mmio_writew = gen4_write16;
  809. dev_priv->uncore.funcs.mmio_writel = gen4_write32;
  810. dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
  811. dev_priv->uncore.funcs.mmio_readb = gen4_read8;
  812. dev_priv->uncore.funcs.mmio_readw = gen4_read16;
  813. dev_priv->uncore.funcs.mmio_readl = gen4_read32;
  814. dev_priv->uncore.funcs.mmio_readq = gen4_read64;
  815. break;
  816. }
  817. }
  818. void intel_uncore_fini(struct drm_device *dev)
  819. {
  820. /* Paranoia: make sure we have disabled everything before we exit. */
  821. intel_uncore_sanitize(dev);
  822. intel_uncore_forcewake_reset(dev, false);
  823. }
  824. #define GEN_RANGE(l, h) GENMASK(h, l)
  825. static const struct register_whitelist {
  826. uint64_t offset;
  827. uint32_t size;
  828. /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  829. uint32_t gen_bitmask;
  830. } whitelist[] = {
  831. { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
  832. };
  833. int i915_reg_read_ioctl(struct drm_device *dev,
  834. void *data, struct drm_file *file)
  835. {
  836. struct drm_i915_private *dev_priv = dev->dev_private;
  837. struct drm_i915_reg_read *reg = data;
  838. struct register_whitelist const *entry = whitelist;
  839. int i, ret = 0;
  840. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  841. if (entry->offset == reg->offset &&
  842. (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  843. break;
  844. }
  845. if (i == ARRAY_SIZE(whitelist))
  846. return -EINVAL;
  847. intel_runtime_pm_get(dev_priv);
  848. switch (entry->size) {
  849. case 8:
  850. reg->val = I915_READ64(reg->offset);
  851. break;
  852. case 4:
  853. reg->val = I915_READ(reg->offset);
  854. break;
  855. case 2:
  856. reg->val = I915_READ16(reg->offset);
  857. break;
  858. case 1:
  859. reg->val = I915_READ8(reg->offset);
  860. break;
  861. default:
  862. WARN_ON(1);
  863. ret = -EINVAL;
  864. goto out;
  865. }
  866. out:
  867. intel_runtime_pm_put(dev_priv);
  868. return ret;
  869. }
  870. int i915_get_reset_stats_ioctl(struct drm_device *dev,
  871. void *data, struct drm_file *file)
  872. {
  873. struct drm_i915_private *dev_priv = dev->dev_private;
  874. struct drm_i915_reset_stats *args = data;
  875. struct i915_ctx_hang_stats *hs;
  876. struct intel_context *ctx;
  877. int ret;
  878. if (args->flags || args->pad)
  879. return -EINVAL;
  880. if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
  881. return -EPERM;
  882. ret = mutex_lock_interruptible(&dev->struct_mutex);
  883. if (ret)
  884. return ret;
  885. ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
  886. if (IS_ERR(ctx)) {
  887. mutex_unlock(&dev->struct_mutex);
  888. return PTR_ERR(ctx);
  889. }
  890. hs = &ctx->hang_stats;
  891. if (capable(CAP_SYS_ADMIN))
  892. args->reset_count = i915_reset_count(&dev_priv->gpu_error);
  893. else
  894. args->reset_count = 0;
  895. args->batch_active = hs->batch_active;
  896. args->batch_pending = hs->batch_pending;
  897. mutex_unlock(&dev->struct_mutex);
  898. return 0;
  899. }
  900. static int i965_reset_complete(struct drm_device *dev)
  901. {
  902. u8 gdrst;
  903. pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
  904. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  905. }
  906. static int i965_do_reset(struct drm_device *dev)
  907. {
  908. int ret;
  909. /* FIXME: i965g/gm need a display save/restore for gpu reset. */
  910. return -ENODEV;
  911. /*
  912. * Set the domains we want to reset (GRDOM/bits 2 and 3) as
  913. * well as the reset bit (GR/bit 0). Setting the GR bit
  914. * triggers the reset; when done, the hardware will clear it.
  915. */
  916. pci_write_config_byte(dev->pdev, I965_GDRST,
  917. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  918. ret = wait_for(i965_reset_complete(dev), 500);
  919. if (ret)
  920. return ret;
  921. pci_write_config_byte(dev->pdev, I965_GDRST,
  922. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  923. ret = wait_for(i965_reset_complete(dev), 500);
  924. if (ret)
  925. return ret;
  926. pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  927. return 0;
  928. }
  929. static int g4x_do_reset(struct drm_device *dev)
  930. {
  931. struct drm_i915_private *dev_priv = dev->dev_private;
  932. int ret;
  933. pci_write_config_byte(dev->pdev, I965_GDRST,
  934. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  935. ret = wait_for(i965_reset_complete(dev), 500);
  936. if (ret)
  937. return ret;
  938. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  939. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  940. POSTING_READ(VDECCLK_GATE_D);
  941. pci_write_config_byte(dev->pdev, I965_GDRST,
  942. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  943. ret = wait_for(i965_reset_complete(dev), 500);
  944. if (ret)
  945. return ret;
  946. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  947. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  948. POSTING_READ(VDECCLK_GATE_D);
  949. pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  950. return 0;
  951. }
  952. static int ironlake_do_reset(struct drm_device *dev)
  953. {
  954. struct drm_i915_private *dev_priv = dev->dev_private;
  955. int ret;
  956. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  957. ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  958. ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  959. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  960. if (ret)
  961. return ret;
  962. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  963. ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  964. ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
  965. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  966. if (ret)
  967. return ret;
  968. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
  969. return 0;
  970. }
  971. static int gen6_do_reset(struct drm_device *dev)
  972. {
  973. struct drm_i915_private *dev_priv = dev->dev_private;
  974. int ret;
  975. /* Reset the chip */
  976. /* GEN6_GDRST is not in the gt power well, no need to check
  977. * for fifo space for the write or forcewake the chip for
  978. * the read
  979. */
  980. __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  981. /* Spin waiting for the device to ack the reset request */
  982. ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  983. intel_uncore_forcewake_reset(dev, true);
  984. return ret;
  985. }
  986. int intel_gpu_reset(struct drm_device *dev)
  987. {
  988. if (INTEL_INFO(dev)->gen >= 6)
  989. return gen6_do_reset(dev);
  990. else if (IS_GEN5(dev))
  991. return ironlake_do_reset(dev);
  992. else if (IS_G4X(dev))
  993. return g4x_do_reset(dev);
  994. else if (IS_GEN4(dev))
  995. return i965_do_reset(dev);
  996. else
  997. return -ENODEV;
  998. }
  999. void intel_uncore_check_errors(struct drm_device *dev)
  1000. {
  1001. struct drm_i915_private *dev_priv = dev->dev_private;
  1002. if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  1003. (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  1004. DRM_ERROR("Unclaimed register before interrupt\n");
  1005. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  1006. }
  1007. }