intel_uncore.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #include "i915_vgpu.h"
  26. #include <linux/pm_runtime.h>
  27. #define FORCEWAKE_ACK_TIMEOUT_MS 50
  28. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
  29. static const char * const forcewake_domain_names[] = {
  30. "render",
  31. "blitter",
  32. "media",
  33. };
  34. const char *
  35. intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  36. {
  37. BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  38. if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  39. return forcewake_domain_names[id];
  40. WARN_ON(id);
  41. return "unknown";
  42. }
  43. static inline void
  44. fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
  45. {
  46. WARN_ON(!i915_mmio_reg_valid(d->reg_set));
  47. __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
  48. }
  49. static inline void
  50. fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
  51. {
  52. d->wake_count++;
  53. hrtimer_start_range_ns(&d->timer,
  54. NSEC_PER_MSEC,
  55. NSEC_PER_MSEC,
  56. HRTIMER_MODE_REL);
  57. }
  58. static inline void
  59. fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
  60. {
  61. if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  62. FORCEWAKE_KERNEL) == 0,
  63. FORCEWAKE_ACK_TIMEOUT_MS))
  64. DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
  65. intel_uncore_forcewake_domain_to_str(d->id));
  66. }
  67. static inline void
  68. fw_domain_get(const struct intel_uncore_forcewake_domain *d)
  69. {
  70. __raw_i915_write32(d->i915, d->reg_set, d->val_set);
  71. }
  72. static inline void
  73. fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
  74. {
  75. if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  76. FORCEWAKE_KERNEL),
  77. FORCEWAKE_ACK_TIMEOUT_MS))
  78. DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
  79. intel_uncore_forcewake_domain_to_str(d->id));
  80. }
  81. static inline void
  82. fw_domain_put(const struct intel_uncore_forcewake_domain *d)
  83. {
  84. __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
  85. }
  86. static inline void
  87. fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
  88. {
  89. /* something from same cacheline, but not from the set register */
  90. if (i915_mmio_reg_valid(d->reg_post))
  91. __raw_posting_read(d->i915, d->reg_post);
  92. }
  93. static void
  94. fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  95. {
  96. struct intel_uncore_forcewake_domain *d;
  97. for_each_fw_domain_masked(d, fw_domains, dev_priv) {
  98. fw_domain_wait_ack_clear(d);
  99. fw_domain_get(d);
  100. }
  101. for_each_fw_domain_masked(d, fw_domains, dev_priv)
  102. fw_domain_wait_ack(d);
  103. }
  104. static void
  105. fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  106. {
  107. struct intel_uncore_forcewake_domain *d;
  108. for_each_fw_domain_masked(d, fw_domains, dev_priv) {
  109. fw_domain_put(d);
  110. fw_domain_posting_read(d);
  111. }
  112. }
  113. static void
  114. vgpu_fw_domains_nop(struct drm_i915_private *dev_priv,
  115. enum forcewake_domains fw_domains)
  116. {
  117. /* Guest driver doesn't need to takes care forcewake. */
  118. }
  119. static void
  120. fw_domains_posting_read(struct drm_i915_private *dev_priv)
  121. {
  122. struct intel_uncore_forcewake_domain *d;
  123. /* No need to do for all, just do for first found */
  124. for_each_fw_domain(d, dev_priv) {
  125. fw_domain_posting_read(d);
  126. break;
  127. }
  128. }
  129. static void
  130. fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  131. {
  132. struct intel_uncore_forcewake_domain *d;
  133. if (dev_priv->uncore.fw_domains == 0)
  134. return;
  135. for_each_fw_domain_masked(d, fw_domains, dev_priv)
  136. fw_domain_reset(d);
  137. fw_domains_posting_read(dev_priv);
  138. }
  139. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  140. {
  141. /* w/a for a sporadic read returning 0 by waiting for the GT
  142. * thread to wake up.
  143. */
  144. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
  145. GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
  146. DRM_ERROR("GT thread status wait timed out\n");
  147. }
  148. static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
  149. enum forcewake_domains fw_domains)
  150. {
  151. fw_domains_get(dev_priv, fw_domains);
  152. /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
  153. __gen6_gt_wait_for_thread_c0(dev_priv);
  154. }
  155. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  156. {
  157. u32 gtfifodbg;
  158. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  159. if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  160. __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  161. }
  162. static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
  163. enum forcewake_domains fw_domains)
  164. {
  165. fw_domains_put(dev_priv, fw_domains);
  166. gen6_gt_check_fifodbg(dev_priv);
  167. }
  168. static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
  169. {
  170. u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
  171. return count & GT_FIFO_FREE_ENTRIES_MASK;
  172. }
  173. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  174. {
  175. int ret = 0;
  176. /* On VLV, FIFO will be shared by both SW and HW.
  177. * So, we need to read the FREE_ENTRIES everytime */
  178. if (IS_VALLEYVIEW(dev_priv))
  179. dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
  180. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  181. int loop = 500;
  182. u32 fifo = fifo_free_entries(dev_priv);
  183. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  184. udelay(10);
  185. fifo = fifo_free_entries(dev_priv);
  186. }
  187. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  188. ++ret;
  189. dev_priv->uncore.fifo_count = fifo;
  190. }
  191. dev_priv->uncore.fifo_count--;
  192. return ret;
  193. }
  194. static enum hrtimer_restart
  195. intel_uncore_fw_release_timer(struct hrtimer *timer)
  196. {
  197. struct intel_uncore_forcewake_domain *domain =
  198. container_of(timer, struct intel_uncore_forcewake_domain, timer);
  199. struct drm_i915_private *dev_priv = domain->i915;
  200. unsigned long irqflags;
  201. assert_rpm_device_not_suspended(dev_priv);
  202. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  203. if (WARN_ON(domain->wake_count == 0))
  204. domain->wake_count++;
  205. if (--domain->wake_count == 0) {
  206. dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
  207. dev_priv->uncore.fw_domains_active &= ~domain->mask;
  208. }
  209. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  210. return HRTIMER_NORESTART;
  211. }
  212. void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
  213. bool restore)
  214. {
  215. unsigned long irqflags;
  216. struct intel_uncore_forcewake_domain *domain;
  217. int retry_count = 100;
  218. enum forcewake_domains fw, active_domains;
  219. /* Hold uncore.lock across reset to prevent any register access
  220. * with forcewake not set correctly. Wait until all pending
  221. * timers are run before holding.
  222. */
  223. while (1) {
  224. active_domains = 0;
  225. for_each_fw_domain(domain, dev_priv) {
  226. if (hrtimer_cancel(&domain->timer) == 0)
  227. continue;
  228. intel_uncore_fw_release_timer(&domain->timer);
  229. }
  230. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  231. for_each_fw_domain(domain, dev_priv) {
  232. if (hrtimer_active(&domain->timer))
  233. active_domains |= domain->mask;
  234. }
  235. if (active_domains == 0)
  236. break;
  237. if (--retry_count == 0) {
  238. DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
  239. break;
  240. }
  241. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  242. cond_resched();
  243. }
  244. WARN_ON(active_domains);
  245. fw = dev_priv->uncore.fw_domains_active;
  246. if (fw)
  247. dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
  248. fw_domains_reset(dev_priv, FORCEWAKE_ALL);
  249. if (restore) { /* If reset with a user forcewake, try to restore */
  250. if (fw)
  251. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  252. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
  253. dev_priv->uncore.fifo_count =
  254. fifo_free_entries(dev_priv);
  255. }
  256. if (!restore)
  257. assert_forcewakes_inactive(dev_priv);
  258. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  259. }
  260. static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
  261. {
  262. const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
  263. const unsigned int sets[4] = { 1, 1, 2, 2 };
  264. const u32 cap = dev_priv->edram_cap;
  265. return EDRAM_NUM_BANKS(cap) *
  266. ways[EDRAM_WAYS_IDX(cap)] *
  267. sets[EDRAM_SETS_IDX(cap)] *
  268. 1024 * 1024;
  269. }
  270. u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
  271. {
  272. if (!HAS_EDRAM(dev_priv))
  273. return 0;
  274. /* The needed capability bits for size calculation
  275. * are not there with pre gen9 so return 128MB always.
  276. */
  277. if (INTEL_GEN(dev_priv) < 9)
  278. return 128 * 1024 * 1024;
  279. return gen9_edram_size(dev_priv);
  280. }
  281. static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
  282. {
  283. if (IS_HASWELL(dev_priv) ||
  284. IS_BROADWELL(dev_priv) ||
  285. INTEL_GEN(dev_priv) >= 9) {
  286. dev_priv->edram_cap = __raw_i915_read32(dev_priv,
  287. HSW_EDRAM_CAP);
  288. /* NB: We can't write IDICR yet because we do not have gt funcs
  289. * set up */
  290. } else {
  291. dev_priv->edram_cap = 0;
  292. }
  293. if (HAS_EDRAM(dev_priv))
  294. DRM_INFO("Found %lluMB of eDRAM\n",
  295. intel_uncore_edram_size(dev_priv) / (1024 * 1024));
  296. }
  297. static bool
  298. fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  299. {
  300. u32 dbg;
  301. dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
  302. if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
  303. return false;
  304. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  305. return true;
  306. }
  307. static bool
  308. vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  309. {
  310. u32 cer;
  311. cer = __raw_i915_read32(dev_priv, CLAIM_ER);
  312. if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
  313. return false;
  314. __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
  315. return true;
  316. }
  317. static bool
  318. check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  319. {
  320. if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
  321. return fpga_check_for_unclaimed_mmio(dev_priv);
  322. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  323. return vlv_check_for_unclaimed_mmio(dev_priv);
  324. return false;
  325. }
  326. static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
  327. bool restore_forcewake)
  328. {
  329. struct intel_device_info *info = mkwrite_device_info(dev_priv);
  330. /* clear out unclaimed reg detection bit */
  331. if (check_for_unclaimed_mmio(dev_priv))
  332. DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
  333. /* clear out old GT FIFO errors */
  334. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
  335. __raw_i915_write32(dev_priv, GTFIFODBG,
  336. __raw_i915_read32(dev_priv, GTFIFODBG));
  337. /* WaDisableShadowRegForCpd:chv */
  338. if (IS_CHERRYVIEW(dev_priv)) {
  339. __raw_i915_write32(dev_priv, GTFIFOCTL,
  340. __raw_i915_read32(dev_priv, GTFIFOCTL) |
  341. GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
  342. GT_FIFO_CTL_RC6_POLICY_STALL);
  343. }
  344. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
  345. info->has_decoupled_mmio = false;
  346. intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
  347. }
  348. void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
  349. bool restore_forcewake)
  350. {
  351. __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
  352. i915_check_and_clear_faults(dev_priv);
  353. }
  354. void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
  355. {
  356. i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
  357. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  358. intel_sanitize_gt_powersave(dev_priv);
  359. }
  360. static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  361. enum forcewake_domains fw_domains)
  362. {
  363. struct intel_uncore_forcewake_domain *domain;
  364. fw_domains &= dev_priv->uncore.fw_domains;
  365. for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
  366. if (domain->wake_count++)
  367. fw_domains &= ~domain->mask;
  368. }
  369. if (fw_domains) {
  370. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  371. dev_priv->uncore.fw_domains_active |= fw_domains;
  372. }
  373. }
  374. /**
  375. * intel_uncore_forcewake_get - grab forcewake domain references
  376. * @dev_priv: i915 device instance
  377. * @fw_domains: forcewake domains to get reference on
  378. *
  379. * This function can be used get GT's forcewake domain references.
  380. * Normal register access will handle the forcewake domains automatically.
  381. * However if some sequence requires the GT to not power down a particular
  382. * forcewake domains this function should be called at the beginning of the
  383. * sequence. And subsequently the reference should be dropped by symmetric
  384. * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
  385. * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
  386. */
  387. void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  388. enum forcewake_domains fw_domains)
  389. {
  390. unsigned long irqflags;
  391. if (!dev_priv->uncore.funcs.force_wake_get)
  392. return;
  393. assert_rpm_wakelock_held(dev_priv);
  394. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  395. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  396. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  397. }
  398. /**
  399. * intel_uncore_forcewake_get__locked - grab forcewake domain references
  400. * @dev_priv: i915 device instance
  401. * @fw_domains: forcewake domains to get reference on
  402. *
  403. * See intel_uncore_forcewake_get(). This variant places the onus
  404. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  405. */
  406. void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
  407. enum forcewake_domains fw_domains)
  408. {
  409. assert_spin_locked(&dev_priv->uncore.lock);
  410. if (!dev_priv->uncore.funcs.force_wake_get)
  411. return;
  412. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  413. }
  414. static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  415. enum forcewake_domains fw_domains)
  416. {
  417. struct intel_uncore_forcewake_domain *domain;
  418. fw_domains &= dev_priv->uncore.fw_domains;
  419. for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
  420. if (WARN_ON(domain->wake_count == 0))
  421. continue;
  422. if (--domain->wake_count)
  423. continue;
  424. fw_domain_arm_timer(domain);
  425. }
  426. }
  427. /**
  428. * intel_uncore_forcewake_put - release a forcewake domain reference
  429. * @dev_priv: i915 device instance
  430. * @fw_domains: forcewake domains to put references
  431. *
  432. * This function drops the device-level forcewakes for specified
  433. * domains obtained by intel_uncore_forcewake_get().
  434. */
  435. void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  436. enum forcewake_domains fw_domains)
  437. {
  438. unsigned long irqflags;
  439. if (!dev_priv->uncore.funcs.force_wake_put)
  440. return;
  441. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  442. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  443. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  444. }
  445. /**
  446. * intel_uncore_forcewake_put__locked - grab forcewake domain references
  447. * @dev_priv: i915 device instance
  448. * @fw_domains: forcewake domains to get reference on
  449. *
  450. * See intel_uncore_forcewake_put(). This variant places the onus
  451. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  452. */
  453. void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
  454. enum forcewake_domains fw_domains)
  455. {
  456. assert_spin_locked(&dev_priv->uncore.lock);
  457. if (!dev_priv->uncore.funcs.force_wake_put)
  458. return;
  459. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  460. }
  461. void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
  462. {
  463. if (!dev_priv->uncore.funcs.force_wake_get)
  464. return;
  465. WARN_ON(dev_priv->uncore.fw_domains_active);
  466. }
  467. /* We give fast paths for the really cool registers */
  468. #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
  469. #define __gen6_reg_read_fw_domains(offset) \
  470. ({ \
  471. enum forcewake_domains __fwd; \
  472. if (NEEDS_FORCE_WAKE(offset)) \
  473. __fwd = FORCEWAKE_RENDER; \
  474. else \
  475. __fwd = 0; \
  476. __fwd; \
  477. })
  478. static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
  479. {
  480. if (offset < entry->start)
  481. return -1;
  482. else if (offset > entry->end)
  483. return 1;
  484. else
  485. return 0;
  486. }
  487. /* Copied and "macroized" from lib/bsearch.c */
  488. #define BSEARCH(key, base, num, cmp) ({ \
  489. unsigned int start__ = 0, end__ = (num); \
  490. typeof(base) result__ = NULL; \
  491. while (start__ < end__) { \
  492. unsigned int mid__ = start__ + (end__ - start__) / 2; \
  493. int ret__ = (cmp)((key), (base) + mid__); \
  494. if (ret__ < 0) { \
  495. end__ = mid__; \
  496. } else if (ret__ > 0) { \
  497. start__ = mid__ + 1; \
  498. } else { \
  499. result__ = (base) + mid__; \
  500. break; \
  501. } \
  502. } \
  503. result__; \
  504. })
  505. static enum forcewake_domains
  506. find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
  507. {
  508. const struct intel_forcewake_range *entry;
  509. entry = BSEARCH(offset,
  510. dev_priv->uncore.fw_domains_table,
  511. dev_priv->uncore.fw_domains_table_entries,
  512. fw_range_cmp);
  513. if (!entry)
  514. return 0;
  515. WARN(entry->domains & ~dev_priv->uncore.fw_domains,
  516. "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
  517. entry->domains & ~dev_priv->uncore.fw_domains, offset);
  518. return entry->domains;
  519. }
  520. static void
  521. intel_fw_table_check(struct drm_i915_private *dev_priv)
  522. {
  523. const struct intel_forcewake_range *ranges;
  524. unsigned int num_ranges;
  525. s32 prev;
  526. unsigned int i;
  527. if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
  528. return;
  529. ranges = dev_priv->uncore.fw_domains_table;
  530. if (!ranges)
  531. return;
  532. num_ranges = dev_priv->uncore.fw_domains_table_entries;
  533. for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
  534. WARN_ON_ONCE(IS_GEN9(dev_priv) &&
  535. (prev + 1) != (s32)ranges->start);
  536. WARN_ON_ONCE(prev >= (s32)ranges->start);
  537. prev = ranges->start;
  538. WARN_ON_ONCE(prev >= (s32)ranges->end);
  539. prev = ranges->end;
  540. }
  541. }
  542. #define GEN_FW_RANGE(s, e, d) \
  543. { .start = (s), .end = (e), .domains = (d) }
  544. #define HAS_FWTABLE(dev_priv) \
  545. (IS_GEN9(dev_priv) || \
  546. IS_CHERRYVIEW(dev_priv) || \
  547. IS_VALLEYVIEW(dev_priv))
  548. /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  549. static const struct intel_forcewake_range __vlv_fw_ranges[] = {
  550. GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
  551. GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
  552. GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
  553. GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
  554. GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
  555. GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
  556. GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
  557. };
  558. #define __fwtable_reg_read_fw_domains(offset) \
  559. ({ \
  560. enum forcewake_domains __fwd = 0; \
  561. if (NEEDS_FORCE_WAKE((offset))) \
  562. __fwd = find_fw_domain(dev_priv, offset); \
  563. __fwd; \
  564. })
  565. /* *Must* be sorted by offset! See intel_shadow_table_check(). */
  566. static const i915_reg_t gen8_shadowed_regs[] = {
  567. RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
  568. GEN6_RPNSWREQ, /* 0xA008 */
  569. GEN6_RC_VIDEO_FREQ, /* 0xA00C */
  570. RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
  571. RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
  572. RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
  573. /* TODO: Other registers are not yet used */
  574. };
  575. static void intel_shadow_table_check(void)
  576. {
  577. const i915_reg_t *reg = gen8_shadowed_regs;
  578. s32 prev;
  579. u32 offset;
  580. unsigned int i;
  581. if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
  582. return;
  583. for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
  584. offset = i915_mmio_reg_offset(*reg);
  585. WARN_ON_ONCE(prev >= (s32)offset);
  586. prev = offset;
  587. }
  588. }
  589. static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
  590. {
  591. u32 offset = i915_mmio_reg_offset(*reg);
  592. if (key < offset)
  593. return -1;
  594. else if (key > offset)
  595. return 1;
  596. else
  597. return 0;
  598. }
  599. static bool is_gen8_shadowed(u32 offset)
  600. {
  601. const i915_reg_t *regs = gen8_shadowed_regs;
  602. return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
  603. mmio_reg_cmp);
  604. }
  605. #define __gen8_reg_write_fw_domains(offset) \
  606. ({ \
  607. enum forcewake_domains __fwd; \
  608. if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
  609. __fwd = FORCEWAKE_RENDER; \
  610. else \
  611. __fwd = 0; \
  612. __fwd; \
  613. })
  614. /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  615. static const struct intel_forcewake_range __chv_fw_ranges[] = {
  616. GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
  617. GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  618. GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
  619. GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  620. GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
  621. GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  622. GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
  623. GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  624. GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
  625. GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
  626. GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
  627. GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  628. GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
  629. GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
  630. GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
  631. GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
  632. };
  633. #define __fwtable_reg_write_fw_domains(offset) \
  634. ({ \
  635. enum forcewake_domains __fwd = 0; \
  636. if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
  637. __fwd = find_fw_domain(dev_priv, offset); \
  638. __fwd; \
  639. })
  640. /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  641. static const struct intel_forcewake_range __gen9_fw_ranges[] = {
  642. GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
  643. GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
  644. GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
  645. GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
  646. GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
  647. GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
  648. GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
  649. GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
  650. GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
  651. GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
  652. GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
  653. GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
  654. GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
  655. GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
  656. GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
  657. GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
  658. GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
  659. GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  660. GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
  661. GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
  662. GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
  663. GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
  664. GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
  665. GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
  666. GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
  667. GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
  668. GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
  669. GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
  670. GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
  671. GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
  672. GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
  673. GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
  674. };
  675. static void
  676. ilk_dummy_write(struct drm_i915_private *dev_priv)
  677. {
  678. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  679. * the chip from rc6 before touching it for real. MI_MODE is masked,
  680. * hence harmless to write 0 into. */
  681. __raw_i915_write32(dev_priv, MI_MODE, 0);
  682. }
  683. static void
  684. __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
  685. const i915_reg_t reg,
  686. const bool read,
  687. const bool before)
  688. {
  689. if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
  690. "Unclaimed %s register 0x%x\n",
  691. read ? "read from" : "write to",
  692. i915_mmio_reg_offset(reg)))
  693. i915.mmio_debug--; /* Only report the first N failures */
  694. }
  695. static inline void
  696. unclaimed_reg_debug(struct drm_i915_private *dev_priv,
  697. const i915_reg_t reg,
  698. const bool read,
  699. const bool before)
  700. {
  701. if (likely(!i915.mmio_debug))
  702. return;
  703. __unclaimed_reg_debug(dev_priv, reg, read, before);
  704. }
  705. static const enum decoupled_power_domain fw2dpd_domain[] = {
  706. GEN9_DECOUPLED_PD_RENDER,
  707. GEN9_DECOUPLED_PD_BLITTER,
  708. GEN9_DECOUPLED_PD_ALL,
  709. GEN9_DECOUPLED_PD_MEDIA,
  710. GEN9_DECOUPLED_PD_ALL,
  711. GEN9_DECOUPLED_PD_ALL,
  712. GEN9_DECOUPLED_PD_ALL
  713. };
  714. /*
  715. * Decoupled MMIO access for only 1 DWORD
  716. */
  717. static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
  718. u32 reg,
  719. enum forcewake_domains fw_domain,
  720. enum decoupled_ops operation)
  721. {
  722. enum decoupled_power_domain dp_domain;
  723. u32 ctrl_reg_data = 0;
  724. dp_domain = fw2dpd_domain[fw_domain - 1];
  725. ctrl_reg_data |= reg;
  726. ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
  727. ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
  728. ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
  729. __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
  730. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  731. GEN9_DECOUPLED_REG0_DW1) &
  732. GEN9_DECOUPLED_DW1_GO) == 0,
  733. FORCEWAKE_ACK_TIMEOUT_MS))
  734. DRM_ERROR("Decoupled MMIO wait timed out\n");
  735. }
  736. static inline u32
  737. __gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
  738. u32 reg,
  739. enum forcewake_domains fw_domain)
  740. {
  741. __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
  742. GEN9_DECOUPLED_OP_READ);
  743. return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
  744. }
  745. static inline void
  746. __gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
  747. u32 reg, u32 data,
  748. enum forcewake_domains fw_domain)
  749. {
  750. __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
  751. __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
  752. GEN9_DECOUPLED_OP_WRITE);
  753. }
  754. #define GEN2_READ_HEADER(x) \
  755. u##x val = 0; \
  756. assert_rpm_wakelock_held(dev_priv);
  757. #define GEN2_READ_FOOTER \
  758. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  759. return val
  760. #define __gen2_read(x) \
  761. static u##x \
  762. gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  763. GEN2_READ_HEADER(x); \
  764. val = __raw_i915_read##x(dev_priv, reg); \
  765. GEN2_READ_FOOTER; \
  766. }
  767. #define __gen5_read(x) \
  768. static u##x \
  769. gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  770. GEN2_READ_HEADER(x); \
  771. ilk_dummy_write(dev_priv); \
  772. val = __raw_i915_read##x(dev_priv, reg); \
  773. GEN2_READ_FOOTER; \
  774. }
  775. __gen5_read(8)
  776. __gen5_read(16)
  777. __gen5_read(32)
  778. __gen5_read(64)
  779. __gen2_read(8)
  780. __gen2_read(16)
  781. __gen2_read(32)
  782. __gen2_read(64)
  783. #undef __gen5_read
  784. #undef __gen2_read
  785. #undef GEN2_READ_FOOTER
  786. #undef GEN2_READ_HEADER
  787. #define GEN6_READ_HEADER(x) \
  788. u32 offset = i915_mmio_reg_offset(reg); \
  789. unsigned long irqflags; \
  790. u##x val = 0; \
  791. assert_rpm_wakelock_held(dev_priv); \
  792. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  793. unclaimed_reg_debug(dev_priv, reg, true, true)
  794. #define GEN6_READ_FOOTER \
  795. unclaimed_reg_debug(dev_priv, reg, true, false); \
  796. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  797. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  798. return val
  799. static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
  800. enum forcewake_domains fw_domains)
  801. {
  802. struct intel_uncore_forcewake_domain *domain;
  803. for_each_fw_domain_masked(domain, fw_domains, dev_priv)
  804. fw_domain_arm_timer(domain);
  805. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  806. dev_priv->uncore.fw_domains_active |= fw_domains;
  807. }
  808. static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
  809. enum forcewake_domains fw_domains)
  810. {
  811. if (WARN_ON(!fw_domains))
  812. return;
  813. /* Turn on all requested but inactive supported forcewake domains. */
  814. fw_domains &= dev_priv->uncore.fw_domains;
  815. fw_domains &= ~dev_priv->uncore.fw_domains_active;
  816. if (fw_domains)
  817. ___force_wake_auto(dev_priv, fw_domains);
  818. }
  819. #define __gen6_read(x) \
  820. static u##x \
  821. gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  822. enum forcewake_domains fw_engine; \
  823. GEN6_READ_HEADER(x); \
  824. fw_engine = __gen6_reg_read_fw_domains(offset); \
  825. if (fw_engine) \
  826. __force_wake_auto(dev_priv, fw_engine); \
  827. val = __raw_i915_read##x(dev_priv, reg); \
  828. GEN6_READ_FOOTER; \
  829. }
  830. #define __fwtable_read(x) \
  831. static u##x \
  832. fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  833. enum forcewake_domains fw_engine; \
  834. GEN6_READ_HEADER(x); \
  835. fw_engine = __fwtable_reg_read_fw_domains(offset); \
  836. if (fw_engine) \
  837. __force_wake_auto(dev_priv, fw_engine); \
  838. val = __raw_i915_read##x(dev_priv, reg); \
  839. GEN6_READ_FOOTER; \
  840. }
  841. #define __gen9_decoupled_read(x) \
  842. static u##x \
  843. gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
  844. i915_reg_t reg, bool trace) { \
  845. enum forcewake_domains fw_engine; \
  846. GEN6_READ_HEADER(x); \
  847. fw_engine = __fwtable_reg_read_fw_domains(offset); \
  848. if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
  849. unsigned i; \
  850. u32 *ptr_data = (u32 *) &val; \
  851. for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
  852. *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
  853. offset, \
  854. fw_engine); \
  855. } else { \
  856. val = __raw_i915_read##x(dev_priv, reg); \
  857. } \
  858. GEN6_READ_FOOTER; \
  859. }
  860. __gen9_decoupled_read(32)
  861. __gen9_decoupled_read(64)
  862. __fwtable_read(8)
  863. __fwtable_read(16)
  864. __fwtable_read(32)
  865. __fwtable_read(64)
  866. __gen6_read(8)
  867. __gen6_read(16)
  868. __gen6_read(32)
  869. __gen6_read(64)
  870. #undef __fwtable_read
  871. #undef __gen6_read
  872. #undef GEN6_READ_FOOTER
  873. #undef GEN6_READ_HEADER
  874. #define GEN2_WRITE_HEADER \
  875. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  876. assert_rpm_wakelock_held(dev_priv); \
  877. #define GEN2_WRITE_FOOTER
  878. #define __gen2_write(x) \
  879. static void \
  880. gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  881. GEN2_WRITE_HEADER; \
  882. __raw_i915_write##x(dev_priv, reg, val); \
  883. GEN2_WRITE_FOOTER; \
  884. }
  885. #define __gen5_write(x) \
  886. static void \
  887. gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  888. GEN2_WRITE_HEADER; \
  889. ilk_dummy_write(dev_priv); \
  890. __raw_i915_write##x(dev_priv, reg, val); \
  891. GEN2_WRITE_FOOTER; \
  892. }
  893. __gen5_write(8)
  894. __gen5_write(16)
  895. __gen5_write(32)
  896. __gen2_write(8)
  897. __gen2_write(16)
  898. __gen2_write(32)
  899. #undef __gen5_write
  900. #undef __gen2_write
  901. #undef GEN2_WRITE_FOOTER
  902. #undef GEN2_WRITE_HEADER
  903. #define GEN6_WRITE_HEADER \
  904. u32 offset = i915_mmio_reg_offset(reg); \
  905. unsigned long irqflags; \
  906. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  907. assert_rpm_wakelock_held(dev_priv); \
  908. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  909. unclaimed_reg_debug(dev_priv, reg, false, true)
  910. #define GEN6_WRITE_FOOTER \
  911. unclaimed_reg_debug(dev_priv, reg, false, false); \
  912. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  913. #define __gen6_write(x) \
  914. static void \
  915. gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  916. u32 __fifo_ret = 0; \
  917. GEN6_WRITE_HEADER; \
  918. if (NEEDS_FORCE_WAKE(offset)) { \
  919. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  920. } \
  921. __raw_i915_write##x(dev_priv, reg, val); \
  922. if (unlikely(__fifo_ret)) { \
  923. gen6_gt_check_fifodbg(dev_priv); \
  924. } \
  925. GEN6_WRITE_FOOTER; \
  926. }
  927. #define __gen8_write(x) \
  928. static void \
  929. gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  930. enum forcewake_domains fw_engine; \
  931. GEN6_WRITE_HEADER; \
  932. fw_engine = __gen8_reg_write_fw_domains(offset); \
  933. if (fw_engine) \
  934. __force_wake_auto(dev_priv, fw_engine); \
  935. __raw_i915_write##x(dev_priv, reg, val); \
  936. GEN6_WRITE_FOOTER; \
  937. }
  938. #define __fwtable_write(x) \
  939. static void \
  940. fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  941. enum forcewake_domains fw_engine; \
  942. GEN6_WRITE_HEADER; \
  943. fw_engine = __fwtable_reg_write_fw_domains(offset); \
  944. if (fw_engine) \
  945. __force_wake_auto(dev_priv, fw_engine); \
  946. __raw_i915_write##x(dev_priv, reg, val); \
  947. GEN6_WRITE_FOOTER; \
  948. }
  949. #define __gen9_decoupled_write(x) \
  950. static void \
  951. gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
  952. i915_reg_t reg, u##x val, \
  953. bool trace) { \
  954. enum forcewake_domains fw_engine; \
  955. GEN6_WRITE_HEADER; \
  956. fw_engine = __fwtable_reg_write_fw_domains(offset); \
  957. if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
  958. __gen9_decoupled_mmio_write(dev_priv, \
  959. offset, \
  960. val, \
  961. fw_engine); \
  962. else \
  963. __raw_i915_write##x(dev_priv, reg, val); \
  964. GEN6_WRITE_FOOTER; \
  965. }
  966. __gen9_decoupled_write(32)
  967. __fwtable_write(8)
  968. __fwtable_write(16)
  969. __fwtable_write(32)
  970. __gen8_write(8)
  971. __gen8_write(16)
  972. __gen8_write(32)
  973. __gen6_write(8)
  974. __gen6_write(16)
  975. __gen6_write(32)
  976. #undef __fwtable_write
  977. #undef __gen8_write
  978. #undef __gen6_write
  979. #undef GEN6_WRITE_FOOTER
  980. #undef GEN6_WRITE_HEADER
  981. #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
  982. do { \
  983. dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
  984. dev_priv->uncore.funcs.mmio_writew = x##_write16; \
  985. dev_priv->uncore.funcs.mmio_writel = x##_write32; \
  986. } while (0)
  987. #define ASSIGN_READ_MMIO_VFUNCS(x) \
  988. do { \
  989. dev_priv->uncore.funcs.mmio_readb = x##_read8; \
  990. dev_priv->uncore.funcs.mmio_readw = x##_read16; \
  991. dev_priv->uncore.funcs.mmio_readl = x##_read32; \
  992. dev_priv->uncore.funcs.mmio_readq = x##_read64; \
  993. } while (0)
  994. static void fw_domain_init(struct drm_i915_private *dev_priv,
  995. enum forcewake_domain_id domain_id,
  996. i915_reg_t reg_set,
  997. i915_reg_t reg_ack)
  998. {
  999. struct intel_uncore_forcewake_domain *d;
  1000. if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
  1001. return;
  1002. d = &dev_priv->uncore.fw_domain[domain_id];
  1003. WARN_ON(d->wake_count);
  1004. d->wake_count = 0;
  1005. d->reg_set = reg_set;
  1006. d->reg_ack = reg_ack;
  1007. if (IS_GEN6(dev_priv)) {
  1008. d->val_reset = 0;
  1009. d->val_set = FORCEWAKE_KERNEL;
  1010. d->val_clear = 0;
  1011. } else {
  1012. /* WaRsClearFWBitsAtReset:bdw,skl */
  1013. d->val_reset = _MASKED_BIT_DISABLE(0xffff);
  1014. d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
  1015. d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
  1016. }
  1017. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  1018. d->reg_post = FORCEWAKE_ACK_VLV;
  1019. else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
  1020. d->reg_post = ECOBUS;
  1021. d->i915 = dev_priv;
  1022. d->id = domain_id;
  1023. BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
  1024. BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
  1025. BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
  1026. d->mask = 1 << domain_id;
  1027. hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1028. d->timer.function = intel_uncore_fw_release_timer;
  1029. dev_priv->uncore.fw_domains |= (1 << domain_id);
  1030. fw_domain_reset(d);
  1031. }
  1032. static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
  1033. {
  1034. if (INTEL_INFO(dev_priv)->gen <= 5)
  1035. return;
  1036. if (IS_GEN9(dev_priv)) {
  1037. dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  1038. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1039. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1040. FORCEWAKE_RENDER_GEN9,
  1041. FORCEWAKE_ACK_RENDER_GEN9);
  1042. fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
  1043. FORCEWAKE_BLITTER_GEN9,
  1044. FORCEWAKE_ACK_BLITTER_GEN9);
  1045. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1046. FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
  1047. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1048. dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  1049. if (!IS_CHERRYVIEW(dev_priv))
  1050. dev_priv->uncore.funcs.force_wake_put =
  1051. fw_domains_put_with_fifo;
  1052. else
  1053. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1054. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1055. FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
  1056. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1057. FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
  1058. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  1059. dev_priv->uncore.funcs.force_wake_get =
  1060. fw_domains_get_with_thread_status;
  1061. if (IS_HASWELL(dev_priv))
  1062. dev_priv->uncore.funcs.force_wake_put =
  1063. fw_domains_put_with_fifo;
  1064. else
  1065. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1066. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1067. FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
  1068. } else if (IS_IVYBRIDGE(dev_priv)) {
  1069. u32 ecobus;
  1070. /* IVB configs may use multi-threaded forcewake */
  1071. /* A small trick here - if the bios hasn't configured
  1072. * MT forcewake, and if the device is in RC6, then
  1073. * force_wake_mt_get will not wake the device and the
  1074. * ECOBUS read will return zero. Which will be
  1075. * (correctly) interpreted by the test below as MT
  1076. * forcewake being disabled.
  1077. */
  1078. dev_priv->uncore.funcs.force_wake_get =
  1079. fw_domains_get_with_thread_status;
  1080. dev_priv->uncore.funcs.force_wake_put =
  1081. fw_domains_put_with_fifo;
  1082. /* We need to init first for ECOBUS access and then
  1083. * determine later if we want to reinit, in case of MT access is
  1084. * not working. In this stage we don't know which flavour this
  1085. * ivb is, so it is better to reset also the gen6 fw registers
  1086. * before the ecobus check.
  1087. */
  1088. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  1089. __raw_posting_read(dev_priv, ECOBUS);
  1090. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1091. FORCEWAKE_MT, FORCEWAKE_MT_ACK);
  1092. spin_lock_irq(&dev_priv->uncore.lock);
  1093. fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
  1094. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  1095. fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
  1096. spin_unlock_irq(&dev_priv->uncore.lock);
  1097. if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
  1098. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  1099. DRM_INFO("when using vblank-synced partial screen updates.\n");
  1100. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1101. FORCEWAKE, FORCEWAKE_ACK);
  1102. }
  1103. } else if (IS_GEN6(dev_priv)) {
  1104. dev_priv->uncore.funcs.force_wake_get =
  1105. fw_domains_get_with_thread_status;
  1106. dev_priv->uncore.funcs.force_wake_put =
  1107. fw_domains_put_with_fifo;
  1108. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1109. FORCEWAKE, FORCEWAKE_ACK);
  1110. }
  1111. if (intel_vgpu_active(dev_priv)) {
  1112. dev_priv->uncore.funcs.force_wake_get = vgpu_fw_domains_nop;
  1113. dev_priv->uncore.funcs.force_wake_put = vgpu_fw_domains_nop;
  1114. }
  1115. /* All future platforms are expected to require complex power gating */
  1116. WARN_ON(dev_priv->uncore.fw_domains == 0);
  1117. }
  1118. #define ASSIGN_FW_DOMAINS_TABLE(d) \
  1119. { \
  1120. dev_priv->uncore.fw_domains_table = \
  1121. (struct intel_forcewake_range *)(d); \
  1122. dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
  1123. }
  1124. void intel_uncore_init(struct drm_i915_private *dev_priv)
  1125. {
  1126. i915_check_vgpu(dev_priv);
  1127. intel_uncore_edram_detect(dev_priv);
  1128. intel_uncore_fw_domains_init(dev_priv);
  1129. __intel_uncore_early_sanitize(dev_priv, false);
  1130. dev_priv->uncore.unclaimed_mmio_check = 1;
  1131. switch (INTEL_INFO(dev_priv)->gen) {
  1132. default:
  1133. case 9:
  1134. ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
  1135. ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
  1136. ASSIGN_READ_MMIO_VFUNCS(fwtable);
  1137. if (HAS_DECOUPLED_MMIO(dev_priv)) {
  1138. dev_priv->uncore.funcs.mmio_readl =
  1139. gen9_decoupled_read32;
  1140. dev_priv->uncore.funcs.mmio_readq =
  1141. gen9_decoupled_read64;
  1142. dev_priv->uncore.funcs.mmio_writel =
  1143. gen9_decoupled_write32;
  1144. }
  1145. break;
  1146. case 8:
  1147. if (IS_CHERRYVIEW(dev_priv)) {
  1148. ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
  1149. ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
  1150. ASSIGN_READ_MMIO_VFUNCS(fwtable);
  1151. } else {
  1152. ASSIGN_WRITE_MMIO_VFUNCS(gen8);
  1153. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1154. }
  1155. break;
  1156. case 7:
  1157. case 6:
  1158. ASSIGN_WRITE_MMIO_VFUNCS(gen6);
  1159. if (IS_VALLEYVIEW(dev_priv)) {
  1160. ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
  1161. ASSIGN_READ_MMIO_VFUNCS(fwtable);
  1162. } else {
  1163. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1164. }
  1165. break;
  1166. case 5:
  1167. ASSIGN_WRITE_MMIO_VFUNCS(gen5);
  1168. ASSIGN_READ_MMIO_VFUNCS(gen5);
  1169. break;
  1170. case 4:
  1171. case 3:
  1172. case 2:
  1173. ASSIGN_WRITE_MMIO_VFUNCS(gen2);
  1174. ASSIGN_READ_MMIO_VFUNCS(gen2);
  1175. break;
  1176. }
  1177. intel_fw_table_check(dev_priv);
  1178. if (INTEL_GEN(dev_priv) >= 8)
  1179. intel_shadow_table_check();
  1180. i915_check_and_clear_faults(dev_priv);
  1181. }
  1182. #undef ASSIGN_WRITE_MMIO_VFUNCS
  1183. #undef ASSIGN_READ_MMIO_VFUNCS
  1184. void intel_uncore_fini(struct drm_i915_private *dev_priv)
  1185. {
  1186. /* Paranoia: make sure we have disabled everything before we exit. */
  1187. intel_uncore_sanitize(dev_priv);
  1188. intel_uncore_forcewake_reset(dev_priv, false);
  1189. }
  1190. #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
  1191. static const struct register_whitelist {
  1192. i915_reg_t offset_ldw, offset_udw;
  1193. uint32_t size;
  1194. /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  1195. uint32_t gen_bitmask;
  1196. } whitelist[] = {
  1197. { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
  1198. .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
  1199. .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
  1200. };
  1201. int i915_reg_read_ioctl(struct drm_device *dev,
  1202. void *data, struct drm_file *file)
  1203. {
  1204. struct drm_i915_private *dev_priv = to_i915(dev);
  1205. struct drm_i915_reg_read *reg = data;
  1206. struct register_whitelist const *entry = whitelist;
  1207. unsigned size;
  1208. i915_reg_t offset_ldw, offset_udw;
  1209. int i, ret = 0;
  1210. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  1211. if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
  1212. (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
  1213. break;
  1214. }
  1215. if (i == ARRAY_SIZE(whitelist))
  1216. return -EINVAL;
  1217. /* We use the low bits to encode extra flags as the register should
  1218. * be naturally aligned (and those that are not so aligned merely
  1219. * limit the available flags for that register).
  1220. */
  1221. offset_ldw = entry->offset_ldw;
  1222. offset_udw = entry->offset_udw;
  1223. size = entry->size;
  1224. size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
  1225. intel_runtime_pm_get(dev_priv);
  1226. switch (size) {
  1227. case 8 | 1:
  1228. reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
  1229. break;
  1230. case 8:
  1231. reg->val = I915_READ64(offset_ldw);
  1232. break;
  1233. case 4:
  1234. reg->val = I915_READ(offset_ldw);
  1235. break;
  1236. case 2:
  1237. reg->val = I915_READ16(offset_ldw);
  1238. break;
  1239. case 1:
  1240. reg->val = I915_READ8(offset_ldw);
  1241. break;
  1242. default:
  1243. ret = -EINVAL;
  1244. goto out;
  1245. }
  1246. out:
  1247. intel_runtime_pm_put(dev_priv);
  1248. return ret;
  1249. }
  1250. static int i915_reset_complete(struct pci_dev *pdev)
  1251. {
  1252. u8 gdrst;
  1253. pci_read_config_byte(pdev, I915_GDRST, &gdrst);
  1254. return (gdrst & GRDOM_RESET_STATUS) == 0;
  1255. }
  1256. static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1257. {
  1258. struct pci_dev *pdev = dev_priv->drm.pdev;
  1259. /* assert reset for at least 20 usec */
  1260. pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1261. udelay(20);
  1262. pci_write_config_byte(pdev, I915_GDRST, 0);
  1263. return wait_for(i915_reset_complete(pdev), 500);
  1264. }
  1265. static int g4x_reset_complete(struct pci_dev *pdev)
  1266. {
  1267. u8 gdrst;
  1268. pci_read_config_byte(pdev, I915_GDRST, &gdrst);
  1269. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  1270. }
  1271. static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1272. {
  1273. struct pci_dev *pdev = dev_priv->drm.pdev;
  1274. pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1275. return wait_for(g4x_reset_complete(pdev), 500);
  1276. }
  1277. static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1278. {
  1279. struct pci_dev *pdev = dev_priv->drm.pdev;
  1280. int ret;
  1281. pci_write_config_byte(pdev, I915_GDRST,
  1282. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1283. ret = wait_for(g4x_reset_complete(pdev), 500);
  1284. if (ret)
  1285. return ret;
  1286. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1287. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  1288. POSTING_READ(VDECCLK_GATE_D);
  1289. pci_write_config_byte(pdev, I915_GDRST,
  1290. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1291. ret = wait_for(g4x_reset_complete(pdev), 500);
  1292. if (ret)
  1293. return ret;
  1294. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1295. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  1296. POSTING_READ(VDECCLK_GATE_D);
  1297. pci_write_config_byte(pdev, I915_GDRST, 0);
  1298. return 0;
  1299. }
  1300. static int ironlake_do_reset(struct drm_i915_private *dev_priv,
  1301. unsigned engine_mask)
  1302. {
  1303. int ret;
  1304. I915_WRITE(ILK_GDSR,
  1305. ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  1306. ret = intel_wait_for_register(dev_priv,
  1307. ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
  1308. 500);
  1309. if (ret)
  1310. return ret;
  1311. I915_WRITE(ILK_GDSR,
  1312. ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  1313. ret = intel_wait_for_register(dev_priv,
  1314. ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
  1315. 500);
  1316. if (ret)
  1317. return ret;
  1318. I915_WRITE(ILK_GDSR, 0);
  1319. return 0;
  1320. }
  1321. /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
  1322. static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
  1323. u32 hw_domain_mask)
  1324. {
  1325. /* GEN6_GDRST is not in the gt power well, no need to check
  1326. * for fifo space for the write or forcewake the chip for
  1327. * the read
  1328. */
  1329. __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
  1330. /* Spin waiting for the device to ack the reset requests */
  1331. return intel_wait_for_register_fw(dev_priv,
  1332. GEN6_GDRST, hw_domain_mask, 0,
  1333. 500);
  1334. }
  1335. /**
  1336. * gen6_reset_engines - reset individual engines
  1337. * @dev_priv: i915 device
  1338. * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
  1339. *
  1340. * This function will reset the individual engines that are set in engine_mask.
  1341. * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
  1342. *
  1343. * Note: It is responsibility of the caller to handle the difference between
  1344. * asking full domain reset versus reset for all available individual engines.
  1345. *
  1346. * Returns 0 on success, nonzero on error.
  1347. */
  1348. static int gen6_reset_engines(struct drm_i915_private *dev_priv,
  1349. unsigned engine_mask)
  1350. {
  1351. struct intel_engine_cs *engine;
  1352. const u32 hw_engine_mask[I915_NUM_ENGINES] = {
  1353. [RCS] = GEN6_GRDOM_RENDER,
  1354. [BCS] = GEN6_GRDOM_BLT,
  1355. [VCS] = GEN6_GRDOM_MEDIA,
  1356. [VCS2] = GEN8_GRDOM_MEDIA2,
  1357. [VECS] = GEN6_GRDOM_VECS,
  1358. };
  1359. u32 hw_mask;
  1360. int ret;
  1361. if (engine_mask == ALL_ENGINES) {
  1362. hw_mask = GEN6_GRDOM_FULL;
  1363. } else {
  1364. unsigned int tmp;
  1365. hw_mask = 0;
  1366. for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
  1367. hw_mask |= hw_engine_mask[engine->id];
  1368. }
  1369. ret = gen6_hw_domain_reset(dev_priv, hw_mask);
  1370. intel_uncore_forcewake_reset(dev_priv, true);
  1371. return ret;
  1372. }
  1373. /**
  1374. * intel_wait_for_register_fw - wait until register matches expected state
  1375. * @dev_priv: the i915 device
  1376. * @reg: the register to read
  1377. * @mask: mask to apply to register value
  1378. * @value: expected value
  1379. * @timeout_ms: timeout in millisecond
  1380. *
  1381. * This routine waits until the target register @reg contains the expected
  1382. * @value after applying the @mask, i.e. it waits until ::
  1383. *
  1384. * (I915_READ_FW(reg) & mask) == value
  1385. *
  1386. * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  1387. *
  1388. * Note that this routine assumes the caller holds forcewake asserted, it is
  1389. * not suitable for very long waits. See intel_wait_for_register() if you
  1390. * wish to wait without holding forcewake for the duration (i.e. you expect
  1391. * the wait to be slow).
  1392. *
  1393. * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
  1394. */
  1395. int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
  1396. i915_reg_t reg,
  1397. const u32 mask,
  1398. const u32 value,
  1399. const unsigned long timeout_ms)
  1400. {
  1401. #define done ((I915_READ_FW(reg) & mask) == value)
  1402. int ret = wait_for_us(done, 2);
  1403. if (ret)
  1404. ret = wait_for(done, timeout_ms);
  1405. return ret;
  1406. #undef done
  1407. }
  1408. /**
  1409. * intel_wait_for_register - wait until register matches expected state
  1410. * @dev_priv: the i915 device
  1411. * @reg: the register to read
  1412. * @mask: mask to apply to register value
  1413. * @value: expected value
  1414. * @timeout_ms: timeout in millisecond
  1415. *
  1416. * This routine waits until the target register @reg contains the expected
  1417. * @value after applying the @mask, i.e. it waits until ::
  1418. *
  1419. * (I915_READ(reg) & mask) == value
  1420. *
  1421. * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  1422. *
  1423. * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
  1424. */
  1425. int intel_wait_for_register(struct drm_i915_private *dev_priv,
  1426. i915_reg_t reg,
  1427. const u32 mask,
  1428. const u32 value,
  1429. const unsigned long timeout_ms)
  1430. {
  1431. unsigned fw =
  1432. intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
  1433. int ret;
  1434. intel_uncore_forcewake_get(dev_priv, fw);
  1435. ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
  1436. intel_uncore_forcewake_put(dev_priv, fw);
  1437. if (ret)
  1438. ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
  1439. timeout_ms);
  1440. return ret;
  1441. }
  1442. static int gen8_request_engine_reset(struct intel_engine_cs *engine)
  1443. {
  1444. struct drm_i915_private *dev_priv = engine->i915;
  1445. int ret;
  1446. I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
  1447. _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
  1448. ret = intel_wait_for_register_fw(dev_priv,
  1449. RING_RESET_CTL(engine->mmio_base),
  1450. RESET_CTL_READY_TO_RESET,
  1451. RESET_CTL_READY_TO_RESET,
  1452. 700);
  1453. if (ret)
  1454. DRM_ERROR("%s: reset request timeout\n", engine->name);
  1455. return ret;
  1456. }
  1457. static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
  1458. {
  1459. struct drm_i915_private *dev_priv = engine->i915;
  1460. I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
  1461. _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
  1462. }
  1463. static int gen8_reset_engines(struct drm_i915_private *dev_priv,
  1464. unsigned engine_mask)
  1465. {
  1466. struct intel_engine_cs *engine;
  1467. unsigned int tmp;
  1468. for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
  1469. if (gen8_request_engine_reset(engine))
  1470. goto not_ready;
  1471. return gen6_reset_engines(dev_priv, engine_mask);
  1472. not_ready:
  1473. for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
  1474. gen8_unrequest_engine_reset(engine);
  1475. return -EIO;
  1476. }
  1477. typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
  1478. static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
  1479. {
  1480. if (!i915.reset)
  1481. return NULL;
  1482. if (INTEL_INFO(dev_priv)->gen >= 8)
  1483. return gen8_reset_engines;
  1484. else if (INTEL_INFO(dev_priv)->gen >= 6)
  1485. return gen6_reset_engines;
  1486. else if (IS_GEN5(dev_priv))
  1487. return ironlake_do_reset;
  1488. else if (IS_G4X(dev_priv))
  1489. return g4x_do_reset;
  1490. else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
  1491. return g33_do_reset;
  1492. else if (INTEL_INFO(dev_priv)->gen >= 3)
  1493. return i915_do_reset;
  1494. else
  1495. return NULL;
  1496. }
  1497. int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1498. {
  1499. reset_func reset;
  1500. int ret;
  1501. reset = intel_get_gpu_reset(dev_priv);
  1502. if (reset == NULL)
  1503. return -ENODEV;
  1504. /* If the power well sleeps during the reset, the reset
  1505. * request may be dropped and never completes (causing -EIO).
  1506. */
  1507. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1508. ret = reset(dev_priv, engine_mask);
  1509. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1510. return ret;
  1511. }
  1512. bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
  1513. {
  1514. return intel_get_gpu_reset(dev_priv) != NULL;
  1515. }
  1516. int intel_guc_reset(struct drm_i915_private *dev_priv)
  1517. {
  1518. int ret;
  1519. unsigned long irqflags;
  1520. if (!HAS_GUC(dev_priv))
  1521. return -EINVAL;
  1522. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1523. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  1524. ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
  1525. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  1526. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1527. return ret;
  1528. }
  1529. bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
  1530. {
  1531. return check_for_unclaimed_mmio(dev_priv);
  1532. }
  1533. bool
  1534. intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
  1535. {
  1536. if (unlikely(i915.mmio_debug ||
  1537. dev_priv->uncore.unclaimed_mmio_check <= 0))
  1538. return false;
  1539. if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
  1540. DRM_DEBUG("Unclaimed register detected, "
  1541. "enabling oneshot unclaimed register reporting. "
  1542. "Please use i915.mmio_debug=N for more information.\n");
  1543. i915.mmio_debug++;
  1544. dev_priv->uncore.unclaimed_mmio_check--;
  1545. return true;
  1546. }
  1547. return false;
  1548. }
  1549. static enum forcewake_domains
  1550. intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
  1551. i915_reg_t reg)
  1552. {
  1553. u32 offset = i915_mmio_reg_offset(reg);
  1554. enum forcewake_domains fw_domains;
  1555. if (HAS_FWTABLE(dev_priv)) {
  1556. fw_domains = __fwtable_reg_read_fw_domains(offset);
  1557. } else if (INTEL_GEN(dev_priv) >= 6) {
  1558. fw_domains = __gen6_reg_read_fw_domains(offset);
  1559. } else {
  1560. WARN_ON(!IS_GEN(dev_priv, 2, 5));
  1561. fw_domains = 0;
  1562. }
  1563. WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
  1564. return fw_domains;
  1565. }
  1566. static enum forcewake_domains
  1567. intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
  1568. i915_reg_t reg)
  1569. {
  1570. u32 offset = i915_mmio_reg_offset(reg);
  1571. enum forcewake_domains fw_domains;
  1572. if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
  1573. fw_domains = __fwtable_reg_write_fw_domains(offset);
  1574. } else if (IS_GEN8(dev_priv)) {
  1575. fw_domains = __gen8_reg_write_fw_domains(offset);
  1576. } else if (IS_GEN(dev_priv, 6, 7)) {
  1577. fw_domains = FORCEWAKE_RENDER;
  1578. } else {
  1579. WARN_ON(!IS_GEN(dev_priv, 2, 5));
  1580. fw_domains = 0;
  1581. }
  1582. WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
  1583. return fw_domains;
  1584. }
  1585. /**
  1586. * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
  1587. * a register
  1588. * @dev_priv: pointer to struct drm_i915_private
  1589. * @reg: register in question
  1590. * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
  1591. *
  1592. * Returns a set of forcewake domains required to be taken with for example
  1593. * intel_uncore_forcewake_get for the specified register to be accessible in the
  1594. * specified mode (read, write or read/write) with raw mmio accessors.
  1595. *
  1596. * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
  1597. * callers to do FIFO management on their own or risk losing writes.
  1598. */
  1599. enum forcewake_domains
  1600. intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
  1601. i915_reg_t reg, unsigned int op)
  1602. {
  1603. enum forcewake_domains fw_domains = 0;
  1604. WARN_ON(!op);
  1605. if (intel_vgpu_active(dev_priv))
  1606. return 0;
  1607. if (op & FW_REG_READ)
  1608. fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
  1609. if (op & FW_REG_WRITE)
  1610. fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
  1611. return fw_domains;
  1612. }