intel_uncore.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #include "i915_vgpu.h"
  26. #include <linux/pm_runtime.h>
  27. #define FORCEWAKE_ACK_TIMEOUT_MS 50
  28. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
  29. static const char * const forcewake_domain_names[] = {
  30. "render",
  31. "blitter",
  32. "media",
  33. };
  34. const char *
  35. intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  36. {
  37. BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  38. if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  39. return forcewake_domain_names[id];
  40. WARN_ON(id);
  41. return "unknown";
  42. }
  43. static inline void
  44. fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
  45. {
  46. WARN_ON(!i915_mmio_reg_valid(d->reg_set));
  47. __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
  48. }
  49. static inline void
  50. fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
  51. {
  52. d->wake_count++;
  53. hrtimer_start_range_ns(&d->timer,
  54. NSEC_PER_MSEC,
  55. NSEC_PER_MSEC,
  56. HRTIMER_MODE_REL);
  57. }
  58. static inline void
  59. fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
  60. {
  61. if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  62. FORCEWAKE_KERNEL) == 0,
  63. FORCEWAKE_ACK_TIMEOUT_MS))
  64. DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
  65. intel_uncore_forcewake_domain_to_str(d->id));
  66. }
  67. static inline void
  68. fw_domain_get(const struct intel_uncore_forcewake_domain *d)
  69. {
  70. __raw_i915_write32(d->i915, d->reg_set, d->val_set);
  71. }
  72. static inline void
  73. fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
  74. {
  75. if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  76. FORCEWAKE_KERNEL),
  77. FORCEWAKE_ACK_TIMEOUT_MS))
  78. DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
  79. intel_uncore_forcewake_domain_to_str(d->id));
  80. }
  81. static inline void
  82. fw_domain_put(const struct intel_uncore_forcewake_domain *d)
  83. {
  84. __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
  85. }
  86. static inline void
  87. fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
  88. {
  89. /* something from same cacheline, but not from the set register */
  90. if (i915_mmio_reg_valid(d->reg_post))
  91. __raw_posting_read(d->i915, d->reg_post);
  92. }
  93. static void
  94. fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  95. {
  96. struct intel_uncore_forcewake_domain *d;
  97. for_each_fw_domain_masked(d, fw_domains, dev_priv) {
  98. fw_domain_wait_ack_clear(d);
  99. fw_domain_get(d);
  100. }
  101. for_each_fw_domain_masked(d, fw_domains, dev_priv)
  102. fw_domain_wait_ack(d);
  103. dev_priv->uncore.fw_domains_active |= fw_domains;
  104. }
  105. static void
  106. fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  107. {
  108. struct intel_uncore_forcewake_domain *d;
  109. for_each_fw_domain_masked(d, fw_domains, dev_priv) {
  110. fw_domain_put(d);
  111. fw_domain_posting_read(d);
  112. }
  113. dev_priv->uncore.fw_domains_active &= ~fw_domains;
  114. }
  115. static void
  116. fw_domains_posting_read(struct drm_i915_private *dev_priv)
  117. {
  118. struct intel_uncore_forcewake_domain *d;
  119. /* No need to do for all, just do for first found */
  120. for_each_fw_domain(d, dev_priv) {
  121. fw_domain_posting_read(d);
  122. break;
  123. }
  124. }
  125. static void
  126. fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  127. {
  128. struct intel_uncore_forcewake_domain *d;
  129. if (dev_priv->uncore.fw_domains == 0)
  130. return;
  131. for_each_fw_domain_masked(d, fw_domains, dev_priv)
  132. fw_domain_reset(d);
  133. fw_domains_posting_read(dev_priv);
  134. }
  135. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  136. {
  137. /* w/a for a sporadic read returning 0 by waiting for the GT
  138. * thread to wake up.
  139. */
  140. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
  141. GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
  142. DRM_ERROR("GT thread status wait timed out\n");
  143. }
  144. static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
  145. enum forcewake_domains fw_domains)
  146. {
  147. fw_domains_get(dev_priv, fw_domains);
  148. /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
  149. __gen6_gt_wait_for_thread_c0(dev_priv);
  150. }
  151. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  152. {
  153. u32 gtfifodbg;
  154. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  155. if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  156. __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  157. }
  158. static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
  159. enum forcewake_domains fw_domains)
  160. {
  161. fw_domains_put(dev_priv, fw_domains);
  162. gen6_gt_check_fifodbg(dev_priv);
  163. }
  164. static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
  165. {
  166. u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
  167. return count & GT_FIFO_FREE_ENTRIES_MASK;
  168. }
  169. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  170. {
  171. int ret = 0;
  172. /* On VLV, FIFO will be shared by both SW and HW.
  173. * So, we need to read the FREE_ENTRIES everytime */
  174. if (IS_VALLEYVIEW(dev_priv))
  175. dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
  176. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  177. int loop = 500;
  178. u32 fifo = fifo_free_entries(dev_priv);
  179. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  180. udelay(10);
  181. fifo = fifo_free_entries(dev_priv);
  182. }
  183. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  184. ++ret;
  185. dev_priv->uncore.fifo_count = fifo;
  186. }
  187. dev_priv->uncore.fifo_count--;
  188. return ret;
  189. }
  190. static enum hrtimer_restart
  191. intel_uncore_fw_release_timer(struct hrtimer *timer)
  192. {
  193. struct intel_uncore_forcewake_domain *domain =
  194. container_of(timer, struct intel_uncore_forcewake_domain, timer);
  195. struct drm_i915_private *dev_priv = domain->i915;
  196. unsigned long irqflags;
  197. assert_rpm_device_not_suspended(dev_priv);
  198. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  199. if (WARN_ON(domain->wake_count == 0))
  200. domain->wake_count++;
  201. if (--domain->wake_count == 0)
  202. dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
  203. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  204. return HRTIMER_NORESTART;
  205. }
  206. void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
  207. bool restore)
  208. {
  209. unsigned long irqflags;
  210. struct intel_uncore_forcewake_domain *domain;
  211. int retry_count = 100;
  212. enum forcewake_domains fw, active_domains;
  213. /* Hold uncore.lock across reset to prevent any register access
  214. * with forcewake not set correctly. Wait until all pending
  215. * timers are run before holding.
  216. */
  217. while (1) {
  218. active_domains = 0;
  219. for_each_fw_domain(domain, dev_priv) {
  220. if (hrtimer_cancel(&domain->timer) == 0)
  221. continue;
  222. intel_uncore_fw_release_timer(&domain->timer);
  223. }
  224. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  225. for_each_fw_domain(domain, dev_priv) {
  226. if (hrtimer_active(&domain->timer))
  227. active_domains |= domain->mask;
  228. }
  229. if (active_domains == 0)
  230. break;
  231. if (--retry_count == 0) {
  232. DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
  233. break;
  234. }
  235. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  236. cond_resched();
  237. }
  238. WARN_ON(active_domains);
  239. fw = dev_priv->uncore.fw_domains_active;
  240. if (fw)
  241. dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
  242. fw_domains_reset(dev_priv, FORCEWAKE_ALL);
  243. if (restore) { /* If reset with a user forcewake, try to restore */
  244. if (fw)
  245. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  246. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
  247. dev_priv->uncore.fifo_count =
  248. fifo_free_entries(dev_priv);
  249. }
  250. if (!restore)
  251. assert_forcewakes_inactive(dev_priv);
  252. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  253. }
  254. static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
  255. {
  256. const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
  257. const unsigned int sets[4] = { 1, 1, 2, 2 };
  258. const u32 cap = dev_priv->edram_cap;
  259. return EDRAM_NUM_BANKS(cap) *
  260. ways[EDRAM_WAYS_IDX(cap)] *
  261. sets[EDRAM_SETS_IDX(cap)] *
  262. 1024 * 1024;
  263. }
  264. u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
  265. {
  266. if (!HAS_EDRAM(dev_priv))
  267. return 0;
  268. /* The needed capability bits for size calculation
  269. * are not there with pre gen9 so return 128MB always.
  270. */
  271. if (INTEL_GEN(dev_priv) < 9)
  272. return 128 * 1024 * 1024;
  273. return gen9_edram_size(dev_priv);
  274. }
  275. static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
  276. {
  277. if (IS_HASWELL(dev_priv) ||
  278. IS_BROADWELL(dev_priv) ||
  279. INTEL_GEN(dev_priv) >= 9) {
  280. dev_priv->edram_cap = __raw_i915_read32(dev_priv,
  281. HSW_EDRAM_CAP);
  282. /* NB: We can't write IDICR yet because we do not have gt funcs
  283. * set up */
  284. } else {
  285. dev_priv->edram_cap = 0;
  286. }
  287. if (HAS_EDRAM(dev_priv))
  288. DRM_INFO("Found %lluMB of eDRAM\n",
  289. intel_uncore_edram_size(dev_priv) / (1024 * 1024));
  290. }
  291. static bool
  292. fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  293. {
  294. u32 dbg;
  295. dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
  296. if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
  297. return false;
  298. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  299. return true;
  300. }
  301. static bool
  302. vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  303. {
  304. u32 cer;
  305. cer = __raw_i915_read32(dev_priv, CLAIM_ER);
  306. if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
  307. return false;
  308. __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
  309. return true;
  310. }
  311. static bool
  312. check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  313. {
  314. if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
  315. return fpga_check_for_unclaimed_mmio(dev_priv);
  316. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  317. return vlv_check_for_unclaimed_mmio(dev_priv);
  318. return false;
  319. }
  320. static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
  321. bool restore_forcewake)
  322. {
  323. struct intel_device_info *info = mkwrite_device_info(dev_priv);
  324. /* clear out unclaimed reg detection bit */
  325. if (check_for_unclaimed_mmio(dev_priv))
  326. DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
  327. /* clear out old GT FIFO errors */
  328. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
  329. __raw_i915_write32(dev_priv, GTFIFODBG,
  330. __raw_i915_read32(dev_priv, GTFIFODBG));
  331. /* WaDisableShadowRegForCpd:chv */
  332. if (IS_CHERRYVIEW(dev_priv)) {
  333. __raw_i915_write32(dev_priv, GTFIFOCTL,
  334. __raw_i915_read32(dev_priv, GTFIFOCTL) |
  335. GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
  336. GT_FIFO_CTL_RC6_POLICY_STALL);
  337. }
  338. if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
  339. info->has_decoupled_mmio = false;
  340. intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
  341. }
  342. void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
  343. bool restore_forcewake)
  344. {
  345. __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
  346. i915_check_and_clear_faults(dev_priv);
  347. }
  348. void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
  349. {
  350. i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
  351. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  352. intel_sanitize_gt_powersave(dev_priv);
  353. }
  354. static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  355. enum forcewake_domains fw_domains)
  356. {
  357. struct intel_uncore_forcewake_domain *domain;
  358. fw_domains &= dev_priv->uncore.fw_domains;
  359. for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
  360. if (domain->wake_count++)
  361. fw_domains &= ~domain->mask;
  362. }
  363. if (fw_domains)
  364. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  365. }
  366. /**
  367. * intel_uncore_forcewake_get - grab forcewake domain references
  368. * @dev_priv: i915 device instance
  369. * @fw_domains: forcewake domains to get reference on
  370. *
  371. * This function can be used get GT's forcewake domain references.
  372. * Normal register access will handle the forcewake domains automatically.
  373. * However if some sequence requires the GT to not power down a particular
  374. * forcewake domains this function should be called at the beginning of the
  375. * sequence. And subsequently the reference should be dropped by symmetric
  376. * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
  377. * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
  378. */
  379. void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  380. enum forcewake_domains fw_domains)
  381. {
  382. unsigned long irqflags;
  383. if (!dev_priv->uncore.funcs.force_wake_get)
  384. return;
  385. assert_rpm_wakelock_held(dev_priv);
  386. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  387. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  388. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  389. }
  390. /**
  391. * intel_uncore_forcewake_get__locked - grab forcewake domain references
  392. * @dev_priv: i915 device instance
  393. * @fw_domains: forcewake domains to get reference on
  394. *
  395. * See intel_uncore_forcewake_get(). This variant places the onus
  396. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  397. */
  398. void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
  399. enum forcewake_domains fw_domains)
  400. {
  401. assert_spin_locked(&dev_priv->uncore.lock);
  402. if (!dev_priv->uncore.funcs.force_wake_get)
  403. return;
  404. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  405. }
  406. static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  407. enum forcewake_domains fw_domains)
  408. {
  409. struct intel_uncore_forcewake_domain *domain;
  410. fw_domains &= dev_priv->uncore.fw_domains;
  411. for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
  412. if (WARN_ON(domain->wake_count == 0))
  413. continue;
  414. if (--domain->wake_count)
  415. continue;
  416. fw_domain_arm_timer(domain);
  417. }
  418. }
  419. /**
  420. * intel_uncore_forcewake_put - release a forcewake domain reference
  421. * @dev_priv: i915 device instance
  422. * @fw_domains: forcewake domains to put references
  423. *
  424. * This function drops the device-level forcewakes for specified
  425. * domains obtained by intel_uncore_forcewake_get().
  426. */
  427. void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  428. enum forcewake_domains fw_domains)
  429. {
  430. unsigned long irqflags;
  431. if (!dev_priv->uncore.funcs.force_wake_put)
  432. return;
  433. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  434. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  435. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  436. }
  437. /**
  438. * intel_uncore_forcewake_put__locked - grab forcewake domain references
  439. * @dev_priv: i915 device instance
  440. * @fw_domains: forcewake domains to get reference on
  441. *
  442. * See intel_uncore_forcewake_put(). This variant places the onus
  443. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  444. */
  445. void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
  446. enum forcewake_domains fw_domains)
  447. {
  448. assert_spin_locked(&dev_priv->uncore.lock);
  449. if (!dev_priv->uncore.funcs.force_wake_put)
  450. return;
  451. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  452. }
  453. void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
  454. {
  455. if (!dev_priv->uncore.funcs.force_wake_get)
  456. return;
  457. WARN_ON(dev_priv->uncore.fw_domains_active);
  458. }
  459. /* We give fast paths for the really cool registers */
  460. #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
  461. #define __gen6_reg_read_fw_domains(offset) \
  462. ({ \
  463. enum forcewake_domains __fwd; \
  464. if (NEEDS_FORCE_WAKE(offset)) \
  465. __fwd = FORCEWAKE_RENDER; \
  466. else \
  467. __fwd = 0; \
  468. __fwd; \
  469. })
  470. static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
  471. {
  472. if (offset < entry->start)
  473. return -1;
  474. else if (offset > entry->end)
  475. return 1;
  476. else
  477. return 0;
  478. }
  479. /* Copied and "macroized" from lib/bsearch.c */
  480. #define BSEARCH(key, base, num, cmp) ({ \
  481. unsigned int start__ = 0, end__ = (num); \
  482. typeof(base) result__ = NULL; \
  483. while (start__ < end__) { \
  484. unsigned int mid__ = start__ + (end__ - start__) / 2; \
  485. int ret__ = (cmp)((key), (base) + mid__); \
  486. if (ret__ < 0) { \
  487. end__ = mid__; \
  488. } else if (ret__ > 0) { \
  489. start__ = mid__ + 1; \
  490. } else { \
  491. result__ = (base) + mid__; \
  492. break; \
  493. } \
  494. } \
  495. result__; \
  496. })
  497. static enum forcewake_domains
  498. find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
  499. {
  500. const struct intel_forcewake_range *entry;
  501. entry = BSEARCH(offset,
  502. dev_priv->uncore.fw_domains_table,
  503. dev_priv->uncore.fw_domains_table_entries,
  504. fw_range_cmp);
  505. if (!entry)
  506. return 0;
  507. WARN(entry->domains & ~dev_priv->uncore.fw_domains,
  508. "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
  509. entry->domains & ~dev_priv->uncore.fw_domains, offset);
  510. return entry->domains;
  511. }
  512. static void
  513. intel_fw_table_check(struct drm_i915_private *dev_priv)
  514. {
  515. const struct intel_forcewake_range *ranges;
  516. unsigned int num_ranges;
  517. s32 prev;
  518. unsigned int i;
  519. if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
  520. return;
  521. ranges = dev_priv->uncore.fw_domains_table;
  522. if (!ranges)
  523. return;
  524. num_ranges = dev_priv->uncore.fw_domains_table_entries;
  525. for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
  526. WARN_ON_ONCE(IS_GEN9(dev_priv) &&
  527. (prev + 1) != (s32)ranges->start);
  528. WARN_ON_ONCE(prev >= (s32)ranges->start);
  529. prev = ranges->start;
  530. WARN_ON_ONCE(prev >= (s32)ranges->end);
  531. prev = ranges->end;
  532. }
  533. }
  534. #define GEN_FW_RANGE(s, e, d) \
  535. { .start = (s), .end = (e), .domains = (d) }
  536. #define HAS_FWTABLE(dev_priv) \
  537. (IS_GEN9(dev_priv) || \
  538. IS_CHERRYVIEW(dev_priv) || \
  539. IS_VALLEYVIEW(dev_priv))
  540. /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  541. static const struct intel_forcewake_range __vlv_fw_ranges[] = {
  542. GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
  543. GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
  544. GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
  545. GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
  546. GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
  547. GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
  548. GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
  549. };
  550. #define __fwtable_reg_read_fw_domains(offset) \
  551. ({ \
  552. enum forcewake_domains __fwd = 0; \
  553. if (NEEDS_FORCE_WAKE((offset))) \
  554. __fwd = find_fw_domain(dev_priv, offset); \
  555. __fwd; \
  556. })
  557. /* *Must* be sorted by offset! See intel_shadow_table_check(). */
  558. static const i915_reg_t gen8_shadowed_regs[] = {
  559. RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
  560. GEN6_RPNSWREQ, /* 0xA008 */
  561. GEN6_RC_VIDEO_FREQ, /* 0xA00C */
  562. RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
  563. RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
  564. RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
  565. /* TODO: Other registers are not yet used */
  566. };
  567. static void intel_shadow_table_check(void)
  568. {
  569. const i915_reg_t *reg = gen8_shadowed_regs;
  570. s32 prev;
  571. u32 offset;
  572. unsigned int i;
  573. if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
  574. return;
  575. for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
  576. offset = i915_mmio_reg_offset(*reg);
  577. WARN_ON_ONCE(prev >= (s32)offset);
  578. prev = offset;
  579. }
  580. }
  581. static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
  582. {
  583. u32 offset = i915_mmio_reg_offset(*reg);
  584. if (key < offset)
  585. return -1;
  586. else if (key > offset)
  587. return 1;
  588. else
  589. return 0;
  590. }
  591. static bool is_gen8_shadowed(u32 offset)
  592. {
  593. const i915_reg_t *regs = gen8_shadowed_regs;
  594. return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
  595. mmio_reg_cmp);
  596. }
  597. #define __gen8_reg_write_fw_domains(offset) \
  598. ({ \
  599. enum forcewake_domains __fwd; \
  600. if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
  601. __fwd = FORCEWAKE_RENDER; \
  602. else \
  603. __fwd = 0; \
  604. __fwd; \
  605. })
  606. /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  607. static const struct intel_forcewake_range __chv_fw_ranges[] = {
  608. GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
  609. GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  610. GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
  611. GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  612. GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
  613. GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  614. GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
  615. GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  616. GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
  617. GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
  618. GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
  619. GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  620. GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
  621. GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
  622. GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
  623. GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
  624. };
  625. #define __fwtable_reg_write_fw_domains(offset) \
  626. ({ \
  627. enum forcewake_domains __fwd = 0; \
  628. if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
  629. __fwd = find_fw_domain(dev_priv, offset); \
  630. __fwd; \
  631. })
  632. /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  633. static const struct intel_forcewake_range __gen9_fw_ranges[] = {
  634. GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
  635. GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
  636. GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
  637. GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
  638. GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
  639. GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
  640. GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
  641. GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
  642. GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
  643. GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
  644. GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
  645. GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
  646. GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
  647. GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
  648. GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
  649. GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
  650. GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
  651. GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  652. GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
  653. GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
  654. GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
  655. GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
  656. GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
  657. GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
  658. GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
  659. GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
  660. GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
  661. GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
  662. GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
  663. GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
  664. GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
  665. GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
  666. };
  667. static void
  668. ilk_dummy_write(struct drm_i915_private *dev_priv)
  669. {
  670. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  671. * the chip from rc6 before touching it for real. MI_MODE is masked,
  672. * hence harmless to write 0 into. */
  673. __raw_i915_write32(dev_priv, MI_MODE, 0);
  674. }
  675. static void
  676. __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
  677. const i915_reg_t reg,
  678. const bool read,
  679. const bool before)
  680. {
  681. if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
  682. "Unclaimed %s register 0x%x\n",
  683. read ? "read from" : "write to",
  684. i915_mmio_reg_offset(reg)))
  685. i915.mmio_debug--; /* Only report the first N failures */
  686. }
  687. static inline void
  688. unclaimed_reg_debug(struct drm_i915_private *dev_priv,
  689. const i915_reg_t reg,
  690. const bool read,
  691. const bool before)
  692. {
  693. if (likely(!i915.mmio_debug))
  694. return;
  695. __unclaimed_reg_debug(dev_priv, reg, read, before);
  696. }
  697. static const enum decoupled_power_domain fw2dpd_domain[] = {
  698. GEN9_DECOUPLED_PD_RENDER,
  699. GEN9_DECOUPLED_PD_BLITTER,
  700. GEN9_DECOUPLED_PD_ALL,
  701. GEN9_DECOUPLED_PD_MEDIA,
  702. GEN9_DECOUPLED_PD_ALL,
  703. GEN9_DECOUPLED_PD_ALL,
  704. GEN9_DECOUPLED_PD_ALL
  705. };
  706. /*
  707. * Decoupled MMIO access for only 1 DWORD
  708. */
  709. static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
  710. u32 reg,
  711. enum forcewake_domains fw_domain,
  712. enum decoupled_ops operation)
  713. {
  714. enum decoupled_power_domain dp_domain;
  715. u32 ctrl_reg_data = 0;
  716. dp_domain = fw2dpd_domain[fw_domain - 1];
  717. ctrl_reg_data |= reg;
  718. ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
  719. ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
  720. ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
  721. __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
  722. if (wait_for_atomic((__raw_i915_read32(dev_priv,
  723. GEN9_DECOUPLED_REG0_DW1) &
  724. GEN9_DECOUPLED_DW1_GO) == 0,
  725. FORCEWAKE_ACK_TIMEOUT_MS))
  726. DRM_ERROR("Decoupled MMIO wait timed out\n");
  727. }
  728. static inline u32
  729. __gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
  730. u32 reg,
  731. enum forcewake_domains fw_domain)
  732. {
  733. __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
  734. GEN9_DECOUPLED_OP_READ);
  735. return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
  736. }
  737. static inline void
  738. __gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
  739. u32 reg, u32 data,
  740. enum forcewake_domains fw_domain)
  741. {
  742. __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
  743. __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
  744. GEN9_DECOUPLED_OP_WRITE);
  745. }
  746. #define GEN2_READ_HEADER(x) \
  747. u##x val = 0; \
  748. assert_rpm_wakelock_held(dev_priv);
  749. #define GEN2_READ_FOOTER \
  750. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  751. return val
  752. #define __gen2_read(x) \
  753. static u##x \
  754. gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  755. GEN2_READ_HEADER(x); \
  756. val = __raw_i915_read##x(dev_priv, reg); \
  757. GEN2_READ_FOOTER; \
  758. }
  759. #define __gen5_read(x) \
  760. static u##x \
  761. gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  762. GEN2_READ_HEADER(x); \
  763. ilk_dummy_write(dev_priv); \
  764. val = __raw_i915_read##x(dev_priv, reg); \
  765. GEN2_READ_FOOTER; \
  766. }
  767. __gen5_read(8)
  768. __gen5_read(16)
  769. __gen5_read(32)
  770. __gen5_read(64)
  771. __gen2_read(8)
  772. __gen2_read(16)
  773. __gen2_read(32)
  774. __gen2_read(64)
  775. #undef __gen5_read
  776. #undef __gen2_read
  777. #undef GEN2_READ_FOOTER
  778. #undef GEN2_READ_HEADER
  779. #define GEN6_READ_HEADER(x) \
  780. u32 offset = i915_mmio_reg_offset(reg); \
  781. unsigned long irqflags; \
  782. u##x val = 0; \
  783. assert_rpm_wakelock_held(dev_priv); \
  784. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  785. unclaimed_reg_debug(dev_priv, reg, true, true)
  786. #define GEN6_READ_FOOTER \
  787. unclaimed_reg_debug(dev_priv, reg, true, false); \
  788. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  789. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  790. return val
  791. static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
  792. enum forcewake_domains fw_domains)
  793. {
  794. struct intel_uncore_forcewake_domain *domain;
  795. for_each_fw_domain_masked(domain, fw_domains, dev_priv)
  796. fw_domain_arm_timer(domain);
  797. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  798. }
  799. static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
  800. enum forcewake_domains fw_domains)
  801. {
  802. if (WARN_ON(!fw_domains))
  803. return;
  804. /* Turn on all requested but inactive supported forcewake domains. */
  805. fw_domains &= dev_priv->uncore.fw_domains;
  806. fw_domains &= ~dev_priv->uncore.fw_domains_active;
  807. if (fw_domains)
  808. ___force_wake_auto(dev_priv, fw_domains);
  809. }
  810. #define __gen6_read(x) \
  811. static u##x \
  812. gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  813. enum forcewake_domains fw_engine; \
  814. GEN6_READ_HEADER(x); \
  815. fw_engine = __gen6_reg_read_fw_domains(offset); \
  816. if (fw_engine) \
  817. __force_wake_auto(dev_priv, fw_engine); \
  818. val = __raw_i915_read##x(dev_priv, reg); \
  819. GEN6_READ_FOOTER; \
  820. }
  821. #define __fwtable_read(x) \
  822. static u##x \
  823. fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  824. enum forcewake_domains fw_engine; \
  825. GEN6_READ_HEADER(x); \
  826. fw_engine = __fwtable_reg_read_fw_domains(offset); \
  827. if (fw_engine) \
  828. __force_wake_auto(dev_priv, fw_engine); \
  829. val = __raw_i915_read##x(dev_priv, reg); \
  830. GEN6_READ_FOOTER; \
  831. }
  832. #define __gen9_decoupled_read(x) \
  833. static u##x \
  834. gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
  835. i915_reg_t reg, bool trace) { \
  836. enum forcewake_domains fw_engine; \
  837. GEN6_READ_HEADER(x); \
  838. fw_engine = __fwtable_reg_read_fw_domains(offset); \
  839. if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
  840. unsigned i; \
  841. u32 *ptr_data = (u32 *) &val; \
  842. for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
  843. *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
  844. offset, \
  845. fw_engine); \
  846. } else { \
  847. val = __raw_i915_read##x(dev_priv, reg); \
  848. } \
  849. GEN6_READ_FOOTER; \
  850. }
  851. __gen9_decoupled_read(32)
  852. __gen9_decoupled_read(64)
  853. __fwtable_read(8)
  854. __fwtable_read(16)
  855. __fwtable_read(32)
  856. __fwtable_read(64)
  857. __gen6_read(8)
  858. __gen6_read(16)
  859. __gen6_read(32)
  860. __gen6_read(64)
  861. #undef __fwtable_read
  862. #undef __gen6_read
  863. #undef GEN6_READ_FOOTER
  864. #undef GEN6_READ_HEADER
  865. #define VGPU_READ_HEADER(x) \
  866. unsigned long irqflags; \
  867. u##x val = 0; \
  868. assert_rpm_device_not_suspended(dev_priv); \
  869. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  870. #define VGPU_READ_FOOTER \
  871. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  872. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  873. return val
  874. #define __vgpu_read(x) \
  875. static u##x \
  876. vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  877. VGPU_READ_HEADER(x); \
  878. val = __raw_i915_read##x(dev_priv, reg); \
  879. VGPU_READ_FOOTER; \
  880. }
  881. __vgpu_read(8)
  882. __vgpu_read(16)
  883. __vgpu_read(32)
  884. __vgpu_read(64)
  885. #undef __vgpu_read
  886. #undef VGPU_READ_FOOTER
  887. #undef VGPU_READ_HEADER
  888. #define GEN2_WRITE_HEADER \
  889. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  890. assert_rpm_wakelock_held(dev_priv); \
  891. #define GEN2_WRITE_FOOTER
  892. #define __gen2_write(x) \
  893. static void \
  894. gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  895. GEN2_WRITE_HEADER; \
  896. __raw_i915_write##x(dev_priv, reg, val); \
  897. GEN2_WRITE_FOOTER; \
  898. }
  899. #define __gen5_write(x) \
  900. static void \
  901. gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  902. GEN2_WRITE_HEADER; \
  903. ilk_dummy_write(dev_priv); \
  904. __raw_i915_write##x(dev_priv, reg, val); \
  905. GEN2_WRITE_FOOTER; \
  906. }
  907. __gen5_write(8)
  908. __gen5_write(16)
  909. __gen5_write(32)
  910. __gen2_write(8)
  911. __gen2_write(16)
  912. __gen2_write(32)
  913. #undef __gen5_write
  914. #undef __gen2_write
  915. #undef GEN2_WRITE_FOOTER
  916. #undef GEN2_WRITE_HEADER
  917. #define GEN6_WRITE_HEADER \
  918. u32 offset = i915_mmio_reg_offset(reg); \
  919. unsigned long irqflags; \
  920. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  921. assert_rpm_wakelock_held(dev_priv); \
  922. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  923. unclaimed_reg_debug(dev_priv, reg, false, true)
  924. #define GEN6_WRITE_FOOTER \
  925. unclaimed_reg_debug(dev_priv, reg, false, false); \
  926. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  927. #define __gen6_write(x) \
  928. static void \
  929. gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  930. u32 __fifo_ret = 0; \
  931. GEN6_WRITE_HEADER; \
  932. if (NEEDS_FORCE_WAKE(offset)) { \
  933. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  934. } \
  935. __raw_i915_write##x(dev_priv, reg, val); \
  936. if (unlikely(__fifo_ret)) { \
  937. gen6_gt_check_fifodbg(dev_priv); \
  938. } \
  939. GEN6_WRITE_FOOTER; \
  940. }
  941. #define __gen8_write(x) \
  942. static void \
  943. gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  944. enum forcewake_domains fw_engine; \
  945. GEN6_WRITE_HEADER; \
  946. fw_engine = __gen8_reg_write_fw_domains(offset); \
  947. if (fw_engine) \
  948. __force_wake_auto(dev_priv, fw_engine); \
  949. __raw_i915_write##x(dev_priv, reg, val); \
  950. GEN6_WRITE_FOOTER; \
  951. }
  952. #define __fwtable_write(x) \
  953. static void \
  954. fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  955. enum forcewake_domains fw_engine; \
  956. GEN6_WRITE_HEADER; \
  957. fw_engine = __fwtable_reg_write_fw_domains(offset); \
  958. if (fw_engine) \
  959. __force_wake_auto(dev_priv, fw_engine); \
  960. __raw_i915_write##x(dev_priv, reg, val); \
  961. GEN6_WRITE_FOOTER; \
  962. }
  963. #define __gen9_decoupled_write(x) \
  964. static void \
  965. gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
  966. i915_reg_t reg, u##x val, \
  967. bool trace) { \
  968. enum forcewake_domains fw_engine; \
  969. GEN6_WRITE_HEADER; \
  970. fw_engine = __fwtable_reg_write_fw_domains(offset); \
  971. if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
  972. __gen9_decoupled_mmio_write(dev_priv, \
  973. offset, \
  974. val, \
  975. fw_engine); \
  976. else \
  977. __raw_i915_write##x(dev_priv, reg, val); \
  978. GEN6_WRITE_FOOTER; \
  979. }
  980. __gen9_decoupled_write(32)
  981. __fwtable_write(8)
  982. __fwtable_write(16)
  983. __fwtable_write(32)
  984. __gen8_write(8)
  985. __gen8_write(16)
  986. __gen8_write(32)
  987. __gen6_write(8)
  988. __gen6_write(16)
  989. __gen6_write(32)
  990. #undef __fwtable_write
  991. #undef __gen8_write
  992. #undef __gen6_write
  993. #undef GEN6_WRITE_FOOTER
  994. #undef GEN6_WRITE_HEADER
  995. #define VGPU_WRITE_HEADER \
  996. unsigned long irqflags; \
  997. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  998. assert_rpm_device_not_suspended(dev_priv); \
  999. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  1000. #define VGPU_WRITE_FOOTER \
  1001. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  1002. #define __vgpu_write(x) \
  1003. static void vgpu_write##x(struct drm_i915_private *dev_priv, \
  1004. i915_reg_t reg, u##x val, bool trace) { \
  1005. VGPU_WRITE_HEADER; \
  1006. __raw_i915_write##x(dev_priv, reg, val); \
  1007. VGPU_WRITE_FOOTER; \
  1008. }
  1009. __vgpu_write(8)
  1010. __vgpu_write(16)
  1011. __vgpu_write(32)
  1012. #undef __vgpu_write
  1013. #undef VGPU_WRITE_FOOTER
  1014. #undef VGPU_WRITE_HEADER
  1015. #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
  1016. do { \
  1017. dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
  1018. dev_priv->uncore.funcs.mmio_writew = x##_write16; \
  1019. dev_priv->uncore.funcs.mmio_writel = x##_write32; \
  1020. } while (0)
  1021. #define ASSIGN_READ_MMIO_VFUNCS(x) \
  1022. do { \
  1023. dev_priv->uncore.funcs.mmio_readb = x##_read8; \
  1024. dev_priv->uncore.funcs.mmio_readw = x##_read16; \
  1025. dev_priv->uncore.funcs.mmio_readl = x##_read32; \
  1026. dev_priv->uncore.funcs.mmio_readq = x##_read64; \
  1027. } while (0)
  1028. static void fw_domain_init(struct drm_i915_private *dev_priv,
  1029. enum forcewake_domain_id domain_id,
  1030. i915_reg_t reg_set,
  1031. i915_reg_t reg_ack)
  1032. {
  1033. struct intel_uncore_forcewake_domain *d;
  1034. if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
  1035. return;
  1036. d = &dev_priv->uncore.fw_domain[domain_id];
  1037. WARN_ON(d->wake_count);
  1038. d->wake_count = 0;
  1039. d->reg_set = reg_set;
  1040. d->reg_ack = reg_ack;
  1041. if (IS_GEN6(dev_priv)) {
  1042. d->val_reset = 0;
  1043. d->val_set = FORCEWAKE_KERNEL;
  1044. d->val_clear = 0;
  1045. } else {
  1046. /* WaRsClearFWBitsAtReset:bdw,skl */
  1047. d->val_reset = _MASKED_BIT_DISABLE(0xffff);
  1048. d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
  1049. d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
  1050. }
  1051. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  1052. d->reg_post = FORCEWAKE_ACK_VLV;
  1053. else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
  1054. d->reg_post = ECOBUS;
  1055. d->i915 = dev_priv;
  1056. d->id = domain_id;
  1057. BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
  1058. BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
  1059. BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
  1060. d->mask = 1 << domain_id;
  1061. hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1062. d->timer.function = intel_uncore_fw_release_timer;
  1063. dev_priv->uncore.fw_domains |= (1 << domain_id);
  1064. fw_domain_reset(d);
  1065. }
  1066. static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
  1067. {
  1068. if (INTEL_INFO(dev_priv)->gen <= 5)
  1069. return;
  1070. if (IS_GEN9(dev_priv)) {
  1071. dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  1072. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1073. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1074. FORCEWAKE_RENDER_GEN9,
  1075. FORCEWAKE_ACK_RENDER_GEN9);
  1076. fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
  1077. FORCEWAKE_BLITTER_GEN9,
  1078. FORCEWAKE_ACK_BLITTER_GEN9);
  1079. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1080. FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
  1081. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1082. dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  1083. if (!IS_CHERRYVIEW(dev_priv))
  1084. dev_priv->uncore.funcs.force_wake_put =
  1085. fw_domains_put_with_fifo;
  1086. else
  1087. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1088. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1089. FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
  1090. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1091. FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
  1092. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  1093. dev_priv->uncore.funcs.force_wake_get =
  1094. fw_domains_get_with_thread_status;
  1095. if (IS_HASWELL(dev_priv))
  1096. dev_priv->uncore.funcs.force_wake_put =
  1097. fw_domains_put_with_fifo;
  1098. else
  1099. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1100. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1101. FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
  1102. } else if (IS_IVYBRIDGE(dev_priv)) {
  1103. u32 ecobus;
  1104. /* IVB configs may use multi-threaded forcewake */
  1105. /* A small trick here - if the bios hasn't configured
  1106. * MT forcewake, and if the device is in RC6, then
  1107. * force_wake_mt_get will not wake the device and the
  1108. * ECOBUS read will return zero. Which will be
  1109. * (correctly) interpreted by the test below as MT
  1110. * forcewake being disabled.
  1111. */
  1112. dev_priv->uncore.funcs.force_wake_get =
  1113. fw_domains_get_with_thread_status;
  1114. dev_priv->uncore.funcs.force_wake_put =
  1115. fw_domains_put_with_fifo;
  1116. /* We need to init first for ECOBUS access and then
  1117. * determine later if we want to reinit, in case of MT access is
  1118. * not working. In this stage we don't know which flavour this
  1119. * ivb is, so it is better to reset also the gen6 fw registers
  1120. * before the ecobus check.
  1121. */
  1122. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  1123. __raw_posting_read(dev_priv, ECOBUS);
  1124. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1125. FORCEWAKE_MT, FORCEWAKE_MT_ACK);
  1126. spin_lock_irq(&dev_priv->uncore.lock);
  1127. fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
  1128. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  1129. fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
  1130. spin_unlock_irq(&dev_priv->uncore.lock);
  1131. if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
  1132. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  1133. DRM_INFO("when using vblank-synced partial screen updates.\n");
  1134. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1135. FORCEWAKE, FORCEWAKE_ACK);
  1136. }
  1137. } else if (IS_GEN6(dev_priv)) {
  1138. dev_priv->uncore.funcs.force_wake_get =
  1139. fw_domains_get_with_thread_status;
  1140. dev_priv->uncore.funcs.force_wake_put =
  1141. fw_domains_put_with_fifo;
  1142. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1143. FORCEWAKE, FORCEWAKE_ACK);
  1144. }
  1145. /* All future platforms are expected to require complex power gating */
  1146. WARN_ON(dev_priv->uncore.fw_domains == 0);
  1147. }
  1148. #define ASSIGN_FW_DOMAINS_TABLE(d) \
  1149. { \
  1150. dev_priv->uncore.fw_domains_table = \
  1151. (struct intel_forcewake_range *)(d); \
  1152. dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
  1153. }
  1154. void intel_uncore_init(struct drm_i915_private *dev_priv)
  1155. {
  1156. i915_check_vgpu(dev_priv);
  1157. intel_uncore_edram_detect(dev_priv);
  1158. intel_uncore_fw_domains_init(dev_priv);
  1159. __intel_uncore_early_sanitize(dev_priv, false);
  1160. dev_priv->uncore.unclaimed_mmio_check = 1;
  1161. switch (INTEL_INFO(dev_priv)->gen) {
  1162. default:
  1163. case 9:
  1164. ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
  1165. ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
  1166. ASSIGN_READ_MMIO_VFUNCS(fwtable);
  1167. if (HAS_DECOUPLED_MMIO(dev_priv)) {
  1168. dev_priv->uncore.funcs.mmio_readl =
  1169. gen9_decoupled_read32;
  1170. dev_priv->uncore.funcs.mmio_readq =
  1171. gen9_decoupled_read64;
  1172. dev_priv->uncore.funcs.mmio_writel =
  1173. gen9_decoupled_write32;
  1174. }
  1175. break;
  1176. case 8:
  1177. if (IS_CHERRYVIEW(dev_priv)) {
  1178. ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
  1179. ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
  1180. ASSIGN_READ_MMIO_VFUNCS(fwtable);
  1181. } else {
  1182. ASSIGN_WRITE_MMIO_VFUNCS(gen8);
  1183. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1184. }
  1185. break;
  1186. case 7:
  1187. case 6:
  1188. ASSIGN_WRITE_MMIO_VFUNCS(gen6);
  1189. if (IS_VALLEYVIEW(dev_priv)) {
  1190. ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
  1191. ASSIGN_READ_MMIO_VFUNCS(fwtable);
  1192. } else {
  1193. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1194. }
  1195. break;
  1196. case 5:
  1197. ASSIGN_WRITE_MMIO_VFUNCS(gen5);
  1198. ASSIGN_READ_MMIO_VFUNCS(gen5);
  1199. break;
  1200. case 4:
  1201. case 3:
  1202. case 2:
  1203. ASSIGN_WRITE_MMIO_VFUNCS(gen2);
  1204. ASSIGN_READ_MMIO_VFUNCS(gen2);
  1205. break;
  1206. }
  1207. intel_fw_table_check(dev_priv);
  1208. if (INTEL_GEN(dev_priv) >= 8)
  1209. intel_shadow_table_check();
  1210. if (intel_vgpu_active(dev_priv)) {
  1211. ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
  1212. ASSIGN_READ_MMIO_VFUNCS(vgpu);
  1213. }
  1214. i915_check_and_clear_faults(dev_priv);
  1215. }
  1216. #undef ASSIGN_WRITE_MMIO_VFUNCS
  1217. #undef ASSIGN_READ_MMIO_VFUNCS
  1218. void intel_uncore_fini(struct drm_i915_private *dev_priv)
  1219. {
  1220. /* Paranoia: make sure we have disabled everything before we exit. */
  1221. intel_uncore_sanitize(dev_priv);
  1222. intel_uncore_forcewake_reset(dev_priv, false);
  1223. }
  1224. #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
  1225. static const struct register_whitelist {
  1226. i915_reg_t offset_ldw, offset_udw;
  1227. uint32_t size;
  1228. /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  1229. uint32_t gen_bitmask;
  1230. } whitelist[] = {
  1231. { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
  1232. .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
  1233. .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
  1234. };
  1235. int i915_reg_read_ioctl(struct drm_device *dev,
  1236. void *data, struct drm_file *file)
  1237. {
  1238. struct drm_i915_private *dev_priv = to_i915(dev);
  1239. struct drm_i915_reg_read *reg = data;
  1240. struct register_whitelist const *entry = whitelist;
  1241. unsigned size;
  1242. i915_reg_t offset_ldw, offset_udw;
  1243. int i, ret = 0;
  1244. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  1245. if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
  1246. (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
  1247. break;
  1248. }
  1249. if (i == ARRAY_SIZE(whitelist))
  1250. return -EINVAL;
  1251. /* We use the low bits to encode extra flags as the register should
  1252. * be naturally aligned (and those that are not so aligned merely
  1253. * limit the available flags for that register).
  1254. */
  1255. offset_ldw = entry->offset_ldw;
  1256. offset_udw = entry->offset_udw;
  1257. size = entry->size;
  1258. size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
  1259. intel_runtime_pm_get(dev_priv);
  1260. switch (size) {
  1261. case 8 | 1:
  1262. reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
  1263. break;
  1264. case 8:
  1265. reg->val = I915_READ64(offset_ldw);
  1266. break;
  1267. case 4:
  1268. reg->val = I915_READ(offset_ldw);
  1269. break;
  1270. case 2:
  1271. reg->val = I915_READ16(offset_ldw);
  1272. break;
  1273. case 1:
  1274. reg->val = I915_READ8(offset_ldw);
  1275. break;
  1276. default:
  1277. ret = -EINVAL;
  1278. goto out;
  1279. }
  1280. out:
  1281. intel_runtime_pm_put(dev_priv);
  1282. return ret;
  1283. }
  1284. static int i915_reset_complete(struct pci_dev *pdev)
  1285. {
  1286. u8 gdrst;
  1287. pci_read_config_byte(pdev, I915_GDRST, &gdrst);
  1288. return (gdrst & GRDOM_RESET_STATUS) == 0;
  1289. }
  1290. static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1291. {
  1292. struct pci_dev *pdev = dev_priv->drm.pdev;
  1293. /* assert reset for at least 20 usec */
  1294. pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1295. udelay(20);
  1296. pci_write_config_byte(pdev, I915_GDRST, 0);
  1297. return wait_for(i915_reset_complete(pdev), 500);
  1298. }
  1299. static int g4x_reset_complete(struct pci_dev *pdev)
  1300. {
  1301. u8 gdrst;
  1302. pci_read_config_byte(pdev, I915_GDRST, &gdrst);
  1303. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  1304. }
  1305. static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1306. {
  1307. struct pci_dev *pdev = dev_priv->drm.pdev;
  1308. pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1309. return wait_for(g4x_reset_complete(pdev), 500);
  1310. }
  1311. static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1312. {
  1313. struct pci_dev *pdev = dev_priv->drm.pdev;
  1314. int ret;
  1315. pci_write_config_byte(pdev, I915_GDRST,
  1316. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1317. ret = wait_for(g4x_reset_complete(pdev), 500);
  1318. if (ret)
  1319. return ret;
  1320. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1321. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  1322. POSTING_READ(VDECCLK_GATE_D);
  1323. pci_write_config_byte(pdev, I915_GDRST,
  1324. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1325. ret = wait_for(g4x_reset_complete(pdev), 500);
  1326. if (ret)
  1327. return ret;
  1328. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1329. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  1330. POSTING_READ(VDECCLK_GATE_D);
  1331. pci_write_config_byte(pdev, I915_GDRST, 0);
  1332. return 0;
  1333. }
  1334. static int ironlake_do_reset(struct drm_i915_private *dev_priv,
  1335. unsigned engine_mask)
  1336. {
  1337. int ret;
  1338. I915_WRITE(ILK_GDSR,
  1339. ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  1340. ret = intel_wait_for_register(dev_priv,
  1341. ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
  1342. 500);
  1343. if (ret)
  1344. return ret;
  1345. I915_WRITE(ILK_GDSR,
  1346. ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  1347. ret = intel_wait_for_register(dev_priv,
  1348. ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
  1349. 500);
  1350. if (ret)
  1351. return ret;
  1352. I915_WRITE(ILK_GDSR, 0);
  1353. return 0;
  1354. }
  1355. /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
  1356. static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
  1357. u32 hw_domain_mask)
  1358. {
  1359. /* GEN6_GDRST is not in the gt power well, no need to check
  1360. * for fifo space for the write or forcewake the chip for
  1361. * the read
  1362. */
  1363. __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
  1364. /* Spin waiting for the device to ack the reset requests */
  1365. return intel_wait_for_register_fw(dev_priv,
  1366. GEN6_GDRST, hw_domain_mask, 0,
  1367. 500);
  1368. }
  1369. /**
  1370. * gen6_reset_engines - reset individual engines
  1371. * @dev_priv: i915 device
  1372. * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
  1373. *
  1374. * This function will reset the individual engines that are set in engine_mask.
  1375. * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
  1376. *
  1377. * Note: It is responsibility of the caller to handle the difference between
  1378. * asking full domain reset versus reset for all available individual engines.
  1379. *
  1380. * Returns 0 on success, nonzero on error.
  1381. */
  1382. static int gen6_reset_engines(struct drm_i915_private *dev_priv,
  1383. unsigned engine_mask)
  1384. {
  1385. struct intel_engine_cs *engine;
  1386. const u32 hw_engine_mask[I915_NUM_ENGINES] = {
  1387. [RCS] = GEN6_GRDOM_RENDER,
  1388. [BCS] = GEN6_GRDOM_BLT,
  1389. [VCS] = GEN6_GRDOM_MEDIA,
  1390. [VCS2] = GEN8_GRDOM_MEDIA2,
  1391. [VECS] = GEN6_GRDOM_VECS,
  1392. };
  1393. u32 hw_mask;
  1394. int ret;
  1395. if (engine_mask == ALL_ENGINES) {
  1396. hw_mask = GEN6_GRDOM_FULL;
  1397. } else {
  1398. unsigned int tmp;
  1399. hw_mask = 0;
  1400. for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
  1401. hw_mask |= hw_engine_mask[engine->id];
  1402. }
  1403. ret = gen6_hw_domain_reset(dev_priv, hw_mask);
  1404. intel_uncore_forcewake_reset(dev_priv, true);
  1405. return ret;
  1406. }
  1407. /**
  1408. * intel_wait_for_register_fw - wait until register matches expected state
  1409. * @dev_priv: the i915 device
  1410. * @reg: the register to read
  1411. * @mask: mask to apply to register value
  1412. * @value: expected value
  1413. * @timeout_ms: timeout in millisecond
  1414. *
  1415. * This routine waits until the target register @reg contains the expected
  1416. * @value after applying the @mask, i.e. it waits until ::
  1417. *
  1418. * (I915_READ_FW(reg) & mask) == value
  1419. *
  1420. * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  1421. *
  1422. * Note that this routine assumes the caller holds forcewake asserted, it is
  1423. * not suitable for very long waits. See intel_wait_for_register() if you
  1424. * wish to wait without holding forcewake for the duration (i.e. you expect
  1425. * the wait to be slow).
  1426. *
  1427. * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
  1428. */
  1429. int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
  1430. i915_reg_t reg,
  1431. const u32 mask,
  1432. const u32 value,
  1433. const unsigned long timeout_ms)
  1434. {
  1435. #define done ((I915_READ_FW(reg) & mask) == value)
  1436. int ret = wait_for_us(done, 2);
  1437. if (ret)
  1438. ret = wait_for(done, timeout_ms);
  1439. return ret;
  1440. #undef done
  1441. }
  1442. /**
  1443. * intel_wait_for_register - wait until register matches expected state
  1444. * @dev_priv: the i915 device
  1445. * @reg: the register to read
  1446. * @mask: mask to apply to register value
  1447. * @value: expected value
  1448. * @timeout_ms: timeout in millisecond
  1449. *
  1450. * This routine waits until the target register @reg contains the expected
  1451. * @value after applying the @mask, i.e. it waits until ::
  1452. *
  1453. * (I915_READ(reg) & mask) == value
  1454. *
  1455. * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  1456. *
  1457. * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
  1458. */
  1459. int intel_wait_for_register(struct drm_i915_private *dev_priv,
  1460. i915_reg_t reg,
  1461. const u32 mask,
  1462. const u32 value,
  1463. const unsigned long timeout_ms)
  1464. {
  1465. unsigned fw =
  1466. intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
  1467. int ret;
  1468. intel_uncore_forcewake_get(dev_priv, fw);
  1469. ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
  1470. intel_uncore_forcewake_put(dev_priv, fw);
  1471. if (ret)
  1472. ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
  1473. timeout_ms);
  1474. return ret;
  1475. }
  1476. static int gen8_request_engine_reset(struct intel_engine_cs *engine)
  1477. {
  1478. struct drm_i915_private *dev_priv = engine->i915;
  1479. int ret;
  1480. I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
  1481. _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
  1482. ret = intel_wait_for_register_fw(dev_priv,
  1483. RING_RESET_CTL(engine->mmio_base),
  1484. RESET_CTL_READY_TO_RESET,
  1485. RESET_CTL_READY_TO_RESET,
  1486. 700);
  1487. if (ret)
  1488. DRM_ERROR("%s: reset request timeout\n", engine->name);
  1489. return ret;
  1490. }
  1491. static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
  1492. {
  1493. struct drm_i915_private *dev_priv = engine->i915;
  1494. I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
  1495. _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
  1496. }
  1497. static int gen8_reset_engines(struct drm_i915_private *dev_priv,
  1498. unsigned engine_mask)
  1499. {
  1500. struct intel_engine_cs *engine;
  1501. unsigned int tmp;
  1502. for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
  1503. if (gen8_request_engine_reset(engine))
  1504. goto not_ready;
  1505. return gen6_reset_engines(dev_priv, engine_mask);
  1506. not_ready:
  1507. for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
  1508. gen8_unrequest_engine_reset(engine);
  1509. return -EIO;
  1510. }
  1511. typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
  1512. static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
  1513. {
  1514. if (!i915.reset)
  1515. return NULL;
  1516. if (INTEL_INFO(dev_priv)->gen >= 8)
  1517. return gen8_reset_engines;
  1518. else if (INTEL_INFO(dev_priv)->gen >= 6)
  1519. return gen6_reset_engines;
  1520. else if (IS_GEN5(dev_priv))
  1521. return ironlake_do_reset;
  1522. else if (IS_G4X(dev_priv))
  1523. return g4x_do_reset;
  1524. else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
  1525. return g33_do_reset;
  1526. else if (INTEL_INFO(dev_priv)->gen >= 3)
  1527. return i915_do_reset;
  1528. else
  1529. return NULL;
  1530. }
  1531. int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1532. {
  1533. reset_func reset;
  1534. int ret;
  1535. reset = intel_get_gpu_reset(dev_priv);
  1536. if (reset == NULL)
  1537. return -ENODEV;
  1538. /* If the power well sleeps during the reset, the reset
  1539. * request may be dropped and never completes (causing -EIO).
  1540. */
  1541. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1542. ret = reset(dev_priv, engine_mask);
  1543. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1544. return ret;
  1545. }
  1546. bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
  1547. {
  1548. return intel_get_gpu_reset(dev_priv) != NULL;
  1549. }
  1550. int intel_guc_reset(struct drm_i915_private *dev_priv)
  1551. {
  1552. int ret;
  1553. unsigned long irqflags;
  1554. if (!HAS_GUC(dev_priv))
  1555. return -EINVAL;
  1556. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1557. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  1558. ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
  1559. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  1560. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1561. return ret;
  1562. }
  1563. bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
  1564. {
  1565. return check_for_unclaimed_mmio(dev_priv);
  1566. }
  1567. bool
  1568. intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
  1569. {
  1570. if (unlikely(i915.mmio_debug ||
  1571. dev_priv->uncore.unclaimed_mmio_check <= 0))
  1572. return false;
  1573. if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
  1574. DRM_DEBUG("Unclaimed register detected, "
  1575. "enabling oneshot unclaimed register reporting. "
  1576. "Please use i915.mmio_debug=N for more information.\n");
  1577. i915.mmio_debug++;
  1578. dev_priv->uncore.unclaimed_mmio_check--;
  1579. return true;
  1580. }
  1581. return false;
  1582. }
  1583. static enum forcewake_domains
  1584. intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
  1585. i915_reg_t reg)
  1586. {
  1587. u32 offset = i915_mmio_reg_offset(reg);
  1588. enum forcewake_domains fw_domains;
  1589. if (HAS_FWTABLE(dev_priv)) {
  1590. fw_domains = __fwtable_reg_read_fw_domains(offset);
  1591. } else if (INTEL_GEN(dev_priv) >= 6) {
  1592. fw_domains = __gen6_reg_read_fw_domains(offset);
  1593. } else {
  1594. WARN_ON(!IS_GEN(dev_priv, 2, 5));
  1595. fw_domains = 0;
  1596. }
  1597. WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
  1598. return fw_domains;
  1599. }
  1600. static enum forcewake_domains
  1601. intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
  1602. i915_reg_t reg)
  1603. {
  1604. u32 offset = i915_mmio_reg_offset(reg);
  1605. enum forcewake_domains fw_domains;
  1606. if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
  1607. fw_domains = __fwtable_reg_write_fw_domains(offset);
  1608. } else if (IS_GEN8(dev_priv)) {
  1609. fw_domains = __gen8_reg_write_fw_domains(offset);
  1610. } else if (IS_GEN(dev_priv, 6, 7)) {
  1611. fw_domains = FORCEWAKE_RENDER;
  1612. } else {
  1613. WARN_ON(!IS_GEN(dev_priv, 2, 5));
  1614. fw_domains = 0;
  1615. }
  1616. WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
  1617. return fw_domains;
  1618. }
  1619. /**
  1620. * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
  1621. * a register
  1622. * @dev_priv: pointer to struct drm_i915_private
  1623. * @reg: register in question
  1624. * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
  1625. *
  1626. * Returns a set of forcewake domains required to be taken with for example
  1627. * intel_uncore_forcewake_get for the specified register to be accessible in the
  1628. * specified mode (read, write or read/write) with raw mmio accessors.
  1629. *
  1630. * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
  1631. * callers to do FIFO management on their own or risk losing writes.
  1632. */
  1633. enum forcewake_domains
  1634. intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
  1635. i915_reg_t reg, unsigned int op)
  1636. {
  1637. enum forcewake_domains fw_domains = 0;
  1638. WARN_ON(!op);
  1639. if (intel_vgpu_active(dev_priv))
  1640. return 0;
  1641. if (op & FW_REG_READ)
  1642. fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
  1643. if (op & FW_REG_WRITE)
  1644. fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
  1645. return fw_domains;
  1646. }