intel_uncore.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #include "i915_vgpu.h"
  26. #include <linux/pm_runtime.h>
  27. #define FORCEWAKE_ACK_TIMEOUT_MS 50
  28. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
  29. static const char * const forcewake_domain_names[] = {
  30. "render",
  31. "blitter",
  32. "media",
  33. };
  34. const char *
  35. intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  36. {
  37. BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  38. if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  39. return forcewake_domain_names[id];
  40. WARN_ON(id);
  41. return "unknown";
  42. }
  43. static inline void
  44. fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
  45. {
  46. WARN_ON(!i915_mmio_reg_valid(d->reg_set));
  47. __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
  48. }
  49. static inline void
  50. fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
  51. {
  52. d->wake_count++;
  53. hrtimer_start_range_ns(&d->timer,
  54. ktime_set(0, NSEC_PER_MSEC),
  55. NSEC_PER_MSEC,
  56. HRTIMER_MODE_REL);
  57. }
  58. static inline void
  59. fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
  60. {
  61. if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  62. FORCEWAKE_KERNEL) == 0,
  63. FORCEWAKE_ACK_TIMEOUT_MS))
  64. DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
  65. intel_uncore_forcewake_domain_to_str(d->id));
  66. }
  67. static inline void
  68. fw_domain_get(const struct intel_uncore_forcewake_domain *d)
  69. {
  70. __raw_i915_write32(d->i915, d->reg_set, d->val_set);
  71. }
  72. static inline void
  73. fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
  74. {
  75. if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  76. FORCEWAKE_KERNEL),
  77. FORCEWAKE_ACK_TIMEOUT_MS))
  78. DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
  79. intel_uncore_forcewake_domain_to_str(d->id));
  80. }
  81. static inline void
  82. fw_domain_put(const struct intel_uncore_forcewake_domain *d)
  83. {
  84. __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
  85. }
  86. static inline void
  87. fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
  88. {
  89. /* something from same cacheline, but not from the set register */
  90. if (i915_mmio_reg_valid(d->reg_post))
  91. __raw_posting_read(d->i915, d->reg_post);
  92. }
  93. static void
  94. fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  95. {
  96. struct intel_uncore_forcewake_domain *d;
  97. for_each_fw_domain_masked(d, fw_domains, dev_priv) {
  98. fw_domain_wait_ack_clear(d);
  99. fw_domain_get(d);
  100. }
  101. for_each_fw_domain_masked(d, fw_domains, dev_priv)
  102. fw_domain_wait_ack(d);
  103. }
  104. static void
  105. fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  106. {
  107. struct intel_uncore_forcewake_domain *d;
  108. for_each_fw_domain_masked(d, fw_domains, dev_priv) {
  109. fw_domain_put(d);
  110. fw_domain_posting_read(d);
  111. }
  112. }
  113. static void
  114. fw_domains_posting_read(struct drm_i915_private *dev_priv)
  115. {
  116. struct intel_uncore_forcewake_domain *d;
  117. /* No need to do for all, just do for first found */
  118. for_each_fw_domain(d, dev_priv) {
  119. fw_domain_posting_read(d);
  120. break;
  121. }
  122. }
  123. static void
  124. fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  125. {
  126. struct intel_uncore_forcewake_domain *d;
  127. if (dev_priv->uncore.fw_domains == 0)
  128. return;
  129. for_each_fw_domain_masked(d, fw_domains, dev_priv)
  130. fw_domain_reset(d);
  131. fw_domains_posting_read(dev_priv);
  132. }
  133. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  134. {
  135. /* w/a for a sporadic read returning 0 by waiting for the GT
  136. * thread to wake up.
  137. */
  138. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
  139. GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
  140. DRM_ERROR("GT thread status wait timed out\n");
  141. }
  142. static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
  143. enum forcewake_domains fw_domains)
  144. {
  145. fw_domains_get(dev_priv, fw_domains);
  146. /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
  147. __gen6_gt_wait_for_thread_c0(dev_priv);
  148. }
  149. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  150. {
  151. u32 gtfifodbg;
  152. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  153. if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  154. __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  155. }
  156. static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
  157. enum forcewake_domains fw_domains)
  158. {
  159. fw_domains_put(dev_priv, fw_domains);
  160. gen6_gt_check_fifodbg(dev_priv);
  161. }
  162. static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
  163. {
  164. u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
  165. return count & GT_FIFO_FREE_ENTRIES_MASK;
  166. }
  167. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  168. {
  169. int ret = 0;
  170. /* On VLV, FIFO will be shared by both SW and HW.
  171. * So, we need to read the FREE_ENTRIES everytime */
  172. if (IS_VALLEYVIEW(dev_priv))
  173. dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
  174. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  175. int loop = 500;
  176. u32 fifo = fifo_free_entries(dev_priv);
  177. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  178. udelay(10);
  179. fifo = fifo_free_entries(dev_priv);
  180. }
  181. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  182. ++ret;
  183. dev_priv->uncore.fifo_count = fifo;
  184. }
  185. dev_priv->uncore.fifo_count--;
  186. return ret;
  187. }
  188. static enum hrtimer_restart
  189. intel_uncore_fw_release_timer(struct hrtimer *timer)
  190. {
  191. struct intel_uncore_forcewake_domain *domain =
  192. container_of(timer, struct intel_uncore_forcewake_domain, timer);
  193. unsigned long irqflags;
  194. assert_rpm_device_not_suspended(domain->i915);
  195. spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
  196. if (WARN_ON(domain->wake_count == 0))
  197. domain->wake_count++;
  198. if (--domain->wake_count == 0)
  199. domain->i915->uncore.funcs.force_wake_put(domain->i915,
  200. 1 << domain->id);
  201. spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
  202. return HRTIMER_NORESTART;
  203. }
  204. void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
  205. bool restore)
  206. {
  207. unsigned long irqflags;
  208. struct intel_uncore_forcewake_domain *domain;
  209. int retry_count = 100;
  210. enum forcewake_domains fw = 0, active_domains;
  211. /* Hold uncore.lock across reset to prevent any register access
  212. * with forcewake not set correctly. Wait until all pending
  213. * timers are run before holding.
  214. */
  215. while (1) {
  216. active_domains = 0;
  217. for_each_fw_domain(domain, dev_priv) {
  218. if (hrtimer_cancel(&domain->timer) == 0)
  219. continue;
  220. intel_uncore_fw_release_timer(&domain->timer);
  221. }
  222. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  223. for_each_fw_domain(domain, dev_priv) {
  224. if (hrtimer_active(&domain->timer))
  225. active_domains |= domain->mask;
  226. }
  227. if (active_domains == 0)
  228. break;
  229. if (--retry_count == 0) {
  230. DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
  231. break;
  232. }
  233. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  234. cond_resched();
  235. }
  236. WARN_ON(active_domains);
  237. for_each_fw_domain(domain, dev_priv)
  238. if (domain->wake_count)
  239. fw |= domain->mask;
  240. if (fw)
  241. dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
  242. fw_domains_reset(dev_priv, FORCEWAKE_ALL);
  243. if (restore) { /* If reset with a user forcewake, try to restore */
  244. if (fw)
  245. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  246. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
  247. dev_priv->uncore.fifo_count =
  248. fifo_free_entries(dev_priv);
  249. }
  250. if (!restore)
  251. assert_forcewakes_inactive(dev_priv);
  252. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  253. }
  254. static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
  255. {
  256. const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
  257. const unsigned int sets[4] = { 1, 1, 2, 2 };
  258. const u32 cap = dev_priv->edram_cap;
  259. return EDRAM_NUM_BANKS(cap) *
  260. ways[EDRAM_WAYS_IDX(cap)] *
  261. sets[EDRAM_SETS_IDX(cap)] *
  262. 1024 * 1024;
  263. }
  264. u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
  265. {
  266. if (!HAS_EDRAM(dev_priv))
  267. return 0;
  268. /* The needed capability bits for size calculation
  269. * are not there with pre gen9 so return 128MB always.
  270. */
  271. if (INTEL_GEN(dev_priv) < 9)
  272. return 128 * 1024 * 1024;
  273. return gen9_edram_size(dev_priv);
  274. }
  275. static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
  276. {
  277. if (IS_HASWELL(dev_priv) ||
  278. IS_BROADWELL(dev_priv) ||
  279. INTEL_GEN(dev_priv) >= 9) {
  280. dev_priv->edram_cap = __raw_i915_read32(dev_priv,
  281. HSW_EDRAM_CAP);
  282. /* NB: We can't write IDICR yet because we do not have gt funcs
  283. * set up */
  284. } else {
  285. dev_priv->edram_cap = 0;
  286. }
  287. if (HAS_EDRAM(dev_priv))
  288. DRM_INFO("Found %lluMB of eDRAM\n",
  289. intel_uncore_edram_size(dev_priv) / (1024 * 1024));
  290. }
  291. static bool
  292. fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  293. {
  294. u32 dbg;
  295. dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
  296. if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
  297. return false;
  298. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  299. return true;
  300. }
  301. static bool
  302. vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  303. {
  304. u32 cer;
  305. cer = __raw_i915_read32(dev_priv, CLAIM_ER);
  306. if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
  307. return false;
  308. __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
  309. return true;
  310. }
  311. static bool
  312. check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  313. {
  314. if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
  315. return fpga_check_for_unclaimed_mmio(dev_priv);
  316. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  317. return vlv_check_for_unclaimed_mmio(dev_priv);
  318. return false;
  319. }
  320. static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
  321. bool restore_forcewake)
  322. {
  323. /* clear out unclaimed reg detection bit */
  324. if (check_for_unclaimed_mmio(dev_priv))
  325. DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
  326. /* clear out old GT FIFO errors */
  327. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
  328. __raw_i915_write32(dev_priv, GTFIFODBG,
  329. __raw_i915_read32(dev_priv, GTFIFODBG));
  330. /* WaDisableShadowRegForCpd:chv */
  331. if (IS_CHERRYVIEW(dev_priv)) {
  332. __raw_i915_write32(dev_priv, GTFIFOCTL,
  333. __raw_i915_read32(dev_priv, GTFIFOCTL) |
  334. GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
  335. GT_FIFO_CTL_RC6_POLICY_STALL);
  336. }
  337. intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
  338. }
  339. void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
  340. bool restore_forcewake)
  341. {
  342. __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
  343. i915_check_and_clear_faults(dev_priv);
  344. }
  345. void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
  346. {
  347. i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
  348. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  349. intel_disable_gt_powersave(dev_priv);
  350. }
  351. static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  352. enum forcewake_domains fw_domains)
  353. {
  354. struct intel_uncore_forcewake_domain *domain;
  355. if (!dev_priv->uncore.funcs.force_wake_get)
  356. return;
  357. fw_domains &= dev_priv->uncore.fw_domains;
  358. for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
  359. if (domain->wake_count++)
  360. fw_domains &= ~domain->mask;
  361. }
  362. if (fw_domains)
  363. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  364. }
  365. /**
  366. * intel_uncore_forcewake_get - grab forcewake domain references
  367. * @dev_priv: i915 device instance
  368. * @fw_domains: forcewake domains to get reference on
  369. *
  370. * This function can be used get GT's forcewake domain references.
  371. * Normal register access will handle the forcewake domains automatically.
  372. * However if some sequence requires the GT to not power down a particular
  373. * forcewake domains this function should be called at the beginning of the
  374. * sequence. And subsequently the reference should be dropped by symmetric
  375. * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
  376. * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
  377. */
  378. void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  379. enum forcewake_domains fw_domains)
  380. {
  381. unsigned long irqflags;
  382. if (!dev_priv->uncore.funcs.force_wake_get)
  383. return;
  384. assert_rpm_wakelock_held(dev_priv);
  385. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  386. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  387. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  388. }
  389. /**
  390. * intel_uncore_forcewake_get__locked - grab forcewake domain references
  391. * @dev_priv: i915 device instance
  392. * @fw_domains: forcewake domains to get reference on
  393. *
  394. * See intel_uncore_forcewake_get(). This variant places the onus
  395. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  396. */
  397. void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
  398. enum forcewake_domains fw_domains)
  399. {
  400. assert_spin_locked(&dev_priv->uncore.lock);
  401. if (!dev_priv->uncore.funcs.force_wake_get)
  402. return;
  403. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  404. }
  405. static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  406. enum forcewake_domains fw_domains)
  407. {
  408. struct intel_uncore_forcewake_domain *domain;
  409. if (!dev_priv->uncore.funcs.force_wake_put)
  410. return;
  411. fw_domains &= dev_priv->uncore.fw_domains;
  412. for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
  413. if (WARN_ON(domain->wake_count == 0))
  414. continue;
  415. if (--domain->wake_count)
  416. continue;
  417. fw_domain_arm_timer(domain);
  418. }
  419. }
  420. /**
  421. * intel_uncore_forcewake_put - release a forcewake domain reference
  422. * @dev_priv: i915 device instance
  423. * @fw_domains: forcewake domains to put references
  424. *
  425. * This function drops the device-level forcewakes for specified
  426. * domains obtained by intel_uncore_forcewake_get().
  427. */
  428. void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  429. enum forcewake_domains fw_domains)
  430. {
  431. unsigned long irqflags;
  432. if (!dev_priv->uncore.funcs.force_wake_put)
  433. return;
  434. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  435. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  436. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  437. }
  438. /**
  439. * intel_uncore_forcewake_put__locked - grab forcewake domain references
  440. * @dev_priv: i915 device instance
  441. * @fw_domains: forcewake domains to get reference on
  442. *
  443. * See intel_uncore_forcewake_put(). This variant places the onus
  444. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  445. */
  446. void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
  447. enum forcewake_domains fw_domains)
  448. {
  449. assert_spin_locked(&dev_priv->uncore.lock);
  450. if (!dev_priv->uncore.funcs.force_wake_put)
  451. return;
  452. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  453. }
  454. void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
  455. {
  456. struct intel_uncore_forcewake_domain *domain;
  457. if (!dev_priv->uncore.funcs.force_wake_get)
  458. return;
  459. for_each_fw_domain(domain, dev_priv)
  460. WARN_ON(domain->wake_count);
  461. }
  462. /* We give fast paths for the really cool registers */
  463. #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
  464. #define __gen6_reg_read_fw_domains(offset) \
  465. ({ \
  466. enum forcewake_domains __fwd; \
  467. if (NEEDS_FORCE_WAKE(offset)) \
  468. __fwd = FORCEWAKE_RENDER; \
  469. else \
  470. __fwd = 0; \
  471. __fwd; \
  472. })
  473. #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
  474. #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
  475. (REG_RANGE((reg), 0x2000, 0x4000) || \
  476. REG_RANGE((reg), 0x5000, 0x8000) || \
  477. REG_RANGE((reg), 0xB000, 0x12000) || \
  478. REG_RANGE((reg), 0x2E000, 0x30000))
  479. #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
  480. (REG_RANGE((reg), 0x12000, 0x14000) || \
  481. REG_RANGE((reg), 0x22000, 0x24000) || \
  482. REG_RANGE((reg), 0x30000, 0x40000))
  483. #define __vlv_reg_read_fw_domains(offset) \
  484. ({ \
  485. enum forcewake_domains __fwd = 0; \
  486. if (!NEEDS_FORCE_WAKE(offset)) \
  487. __fwd = 0; \
  488. else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
  489. __fwd = FORCEWAKE_RENDER; \
  490. else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
  491. __fwd = FORCEWAKE_MEDIA; \
  492. __fwd; \
  493. })
  494. static const i915_reg_t gen8_shadowed_regs[] = {
  495. GEN6_RPNSWREQ,
  496. GEN6_RC_VIDEO_FREQ,
  497. RING_TAIL(RENDER_RING_BASE),
  498. RING_TAIL(GEN6_BSD_RING_BASE),
  499. RING_TAIL(VEBOX_RING_BASE),
  500. RING_TAIL(BLT_RING_BASE),
  501. /* TODO: Other registers are not yet used */
  502. };
  503. static bool is_gen8_shadowed(u32 offset)
  504. {
  505. int i;
  506. for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  507. if (offset == gen8_shadowed_regs[i].reg)
  508. return true;
  509. return false;
  510. }
  511. #define __gen8_reg_write_fw_domains(offset) \
  512. ({ \
  513. enum forcewake_domains __fwd; \
  514. if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
  515. __fwd = FORCEWAKE_RENDER; \
  516. else \
  517. __fwd = 0; \
  518. __fwd; \
  519. })
  520. #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
  521. (REG_RANGE((reg), 0x2000, 0x4000) || \
  522. REG_RANGE((reg), 0x5200, 0x8000) || \
  523. REG_RANGE((reg), 0x8300, 0x8500) || \
  524. REG_RANGE((reg), 0xB000, 0xB480) || \
  525. REG_RANGE((reg), 0xE000, 0xE800))
  526. #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
  527. (REG_RANGE((reg), 0x8800, 0x8900) || \
  528. REG_RANGE((reg), 0xD000, 0xD800) || \
  529. REG_RANGE((reg), 0x12000, 0x14000) || \
  530. REG_RANGE((reg), 0x1A000, 0x1C000) || \
  531. REG_RANGE((reg), 0x1E800, 0x1EA00) || \
  532. REG_RANGE((reg), 0x30000, 0x38000))
  533. #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
  534. (REG_RANGE((reg), 0x4000, 0x5000) || \
  535. REG_RANGE((reg), 0x8000, 0x8300) || \
  536. REG_RANGE((reg), 0x8500, 0x8600) || \
  537. REG_RANGE((reg), 0x9000, 0xB000) || \
  538. REG_RANGE((reg), 0xF000, 0x10000))
  539. #define __chv_reg_read_fw_domains(offset) \
  540. ({ \
  541. enum forcewake_domains __fwd = 0; \
  542. if (!NEEDS_FORCE_WAKE(offset)) \
  543. __fwd = 0; \
  544. else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
  545. __fwd = FORCEWAKE_RENDER; \
  546. else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
  547. __fwd = FORCEWAKE_MEDIA; \
  548. else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
  549. __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
  550. __fwd; \
  551. })
  552. #define __chv_reg_write_fw_domains(offset) \
  553. ({ \
  554. enum forcewake_domains __fwd = 0; \
  555. if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
  556. __fwd = 0; \
  557. else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
  558. __fwd = FORCEWAKE_RENDER; \
  559. else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
  560. __fwd = FORCEWAKE_MEDIA; \
  561. else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
  562. __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
  563. __fwd; \
  564. })
  565. #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
  566. REG_RANGE((reg), 0xB00, 0x2000)
  567. #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
  568. (REG_RANGE((reg), 0x2000, 0x2700) || \
  569. REG_RANGE((reg), 0x3000, 0x4000) || \
  570. REG_RANGE((reg), 0x5200, 0x8000) || \
  571. REG_RANGE((reg), 0x8140, 0x8160) || \
  572. REG_RANGE((reg), 0x8300, 0x8500) || \
  573. REG_RANGE((reg), 0x8C00, 0x8D00) || \
  574. REG_RANGE((reg), 0xB000, 0xB480) || \
  575. REG_RANGE((reg), 0xE000, 0xE900) || \
  576. REG_RANGE((reg), 0x24400, 0x24800))
  577. #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
  578. (REG_RANGE((reg), 0x8130, 0x8140) || \
  579. REG_RANGE((reg), 0x8800, 0x8A00) || \
  580. REG_RANGE((reg), 0xD000, 0xD800) || \
  581. REG_RANGE((reg), 0x12000, 0x14000) || \
  582. REG_RANGE((reg), 0x1A000, 0x1EA00) || \
  583. REG_RANGE((reg), 0x30000, 0x40000))
  584. #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
  585. REG_RANGE((reg), 0x9400, 0x9800)
  586. #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
  587. ((reg) < 0x40000 && \
  588. !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
  589. !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
  590. !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
  591. !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
  592. #define SKL_NEEDS_FORCE_WAKE(reg) \
  593. ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
  594. #define __gen9_reg_read_fw_domains(offset) \
  595. ({ \
  596. enum forcewake_domains __fwd; \
  597. if (!SKL_NEEDS_FORCE_WAKE(offset)) \
  598. __fwd = 0; \
  599. else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
  600. __fwd = FORCEWAKE_RENDER; \
  601. else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
  602. __fwd = FORCEWAKE_MEDIA; \
  603. else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
  604. __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
  605. else \
  606. __fwd = FORCEWAKE_BLITTER; \
  607. __fwd; \
  608. })
  609. static const i915_reg_t gen9_shadowed_regs[] = {
  610. RING_TAIL(RENDER_RING_BASE),
  611. RING_TAIL(GEN6_BSD_RING_BASE),
  612. RING_TAIL(VEBOX_RING_BASE),
  613. RING_TAIL(BLT_RING_BASE),
  614. GEN6_RPNSWREQ,
  615. GEN6_RC_VIDEO_FREQ,
  616. /* TODO: Other registers are not yet used */
  617. };
  618. static bool is_gen9_shadowed(u32 offset)
  619. {
  620. int i;
  621. for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
  622. if (offset == gen9_shadowed_regs[i].reg)
  623. return true;
  624. return false;
  625. }
  626. #define __gen9_reg_write_fw_domains(offset) \
  627. ({ \
  628. enum forcewake_domains __fwd; \
  629. if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
  630. __fwd = 0; \
  631. else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
  632. __fwd = FORCEWAKE_RENDER; \
  633. else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
  634. __fwd = FORCEWAKE_MEDIA; \
  635. else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
  636. __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
  637. else \
  638. __fwd = FORCEWAKE_BLITTER; \
  639. __fwd; \
  640. })
  641. static void
  642. ilk_dummy_write(struct drm_i915_private *dev_priv)
  643. {
  644. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  645. * the chip from rc6 before touching it for real. MI_MODE is masked,
  646. * hence harmless to write 0 into. */
  647. __raw_i915_write32(dev_priv, MI_MODE, 0);
  648. }
  649. static void
  650. __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
  651. const i915_reg_t reg,
  652. const bool read,
  653. const bool before)
  654. {
  655. if (WARN(check_for_unclaimed_mmio(dev_priv),
  656. "Unclaimed register detected %s %s register 0x%x\n",
  657. before ? "before" : "after",
  658. read ? "reading" : "writing to",
  659. i915_mmio_reg_offset(reg)))
  660. i915.mmio_debug--; /* Only report the first N failures */
  661. }
  662. static inline void
  663. unclaimed_reg_debug(struct drm_i915_private *dev_priv,
  664. const i915_reg_t reg,
  665. const bool read,
  666. const bool before)
  667. {
  668. if (likely(!i915.mmio_debug))
  669. return;
  670. __unclaimed_reg_debug(dev_priv, reg, read, before);
  671. }
  672. #define GEN2_READ_HEADER(x) \
  673. u##x val = 0; \
  674. assert_rpm_wakelock_held(dev_priv);
  675. #define GEN2_READ_FOOTER \
  676. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  677. return val
  678. #define __gen2_read(x) \
  679. static u##x \
  680. gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  681. GEN2_READ_HEADER(x); \
  682. val = __raw_i915_read##x(dev_priv, reg); \
  683. GEN2_READ_FOOTER; \
  684. }
  685. #define __gen5_read(x) \
  686. static u##x \
  687. gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  688. GEN2_READ_HEADER(x); \
  689. ilk_dummy_write(dev_priv); \
  690. val = __raw_i915_read##x(dev_priv, reg); \
  691. GEN2_READ_FOOTER; \
  692. }
  693. __gen5_read(8)
  694. __gen5_read(16)
  695. __gen5_read(32)
  696. __gen5_read(64)
  697. __gen2_read(8)
  698. __gen2_read(16)
  699. __gen2_read(32)
  700. __gen2_read(64)
  701. #undef __gen5_read
  702. #undef __gen2_read
  703. #undef GEN2_READ_FOOTER
  704. #undef GEN2_READ_HEADER
  705. #define GEN6_READ_HEADER(x) \
  706. u32 offset = i915_mmio_reg_offset(reg); \
  707. unsigned long irqflags; \
  708. u##x val = 0; \
  709. assert_rpm_wakelock_held(dev_priv); \
  710. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  711. unclaimed_reg_debug(dev_priv, reg, true, true)
  712. #define GEN6_READ_FOOTER \
  713. unclaimed_reg_debug(dev_priv, reg, true, false); \
  714. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  715. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  716. return val
  717. static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
  718. enum forcewake_domains fw_domains)
  719. {
  720. struct intel_uncore_forcewake_domain *domain;
  721. if (WARN_ON(!fw_domains))
  722. return;
  723. /* Ideally GCC would be constant-fold and eliminate this loop */
  724. for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
  725. if (domain->wake_count) {
  726. fw_domains &= ~domain->mask;
  727. continue;
  728. }
  729. fw_domain_arm_timer(domain);
  730. }
  731. if (fw_domains)
  732. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  733. }
  734. #define __gen6_read(x) \
  735. static u##x \
  736. gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  737. enum forcewake_domains fw_engine; \
  738. GEN6_READ_HEADER(x); \
  739. fw_engine = __gen6_reg_read_fw_domains(offset); \
  740. if (fw_engine) \
  741. __force_wake_auto(dev_priv, fw_engine); \
  742. val = __raw_i915_read##x(dev_priv, reg); \
  743. GEN6_READ_FOOTER; \
  744. }
  745. #define __vlv_read(x) \
  746. static u##x \
  747. vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  748. enum forcewake_domains fw_engine; \
  749. GEN6_READ_HEADER(x); \
  750. fw_engine = __vlv_reg_read_fw_domains(offset); \
  751. if (fw_engine) \
  752. __force_wake_auto(dev_priv, fw_engine); \
  753. val = __raw_i915_read##x(dev_priv, reg); \
  754. GEN6_READ_FOOTER; \
  755. }
  756. #define __chv_read(x) \
  757. static u##x \
  758. chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  759. enum forcewake_domains fw_engine; \
  760. GEN6_READ_HEADER(x); \
  761. fw_engine = __chv_reg_read_fw_domains(offset); \
  762. if (fw_engine) \
  763. __force_wake_auto(dev_priv, fw_engine); \
  764. val = __raw_i915_read##x(dev_priv, reg); \
  765. GEN6_READ_FOOTER; \
  766. }
  767. #define __gen9_read(x) \
  768. static u##x \
  769. gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  770. enum forcewake_domains fw_engine; \
  771. GEN6_READ_HEADER(x); \
  772. fw_engine = __gen9_reg_read_fw_domains(offset); \
  773. if (fw_engine) \
  774. __force_wake_auto(dev_priv, fw_engine); \
  775. val = __raw_i915_read##x(dev_priv, reg); \
  776. GEN6_READ_FOOTER; \
  777. }
  778. __gen9_read(8)
  779. __gen9_read(16)
  780. __gen9_read(32)
  781. __gen9_read(64)
  782. __chv_read(8)
  783. __chv_read(16)
  784. __chv_read(32)
  785. __chv_read(64)
  786. __vlv_read(8)
  787. __vlv_read(16)
  788. __vlv_read(32)
  789. __vlv_read(64)
  790. __gen6_read(8)
  791. __gen6_read(16)
  792. __gen6_read(32)
  793. __gen6_read(64)
  794. #undef __gen9_read
  795. #undef __chv_read
  796. #undef __vlv_read
  797. #undef __gen6_read
  798. #undef GEN6_READ_FOOTER
  799. #undef GEN6_READ_HEADER
  800. #define VGPU_READ_HEADER(x) \
  801. unsigned long irqflags; \
  802. u##x val = 0; \
  803. assert_rpm_device_not_suspended(dev_priv); \
  804. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  805. #define VGPU_READ_FOOTER \
  806. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  807. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  808. return val
  809. #define __vgpu_read(x) \
  810. static u##x \
  811. vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  812. VGPU_READ_HEADER(x); \
  813. val = __raw_i915_read##x(dev_priv, reg); \
  814. VGPU_READ_FOOTER; \
  815. }
  816. __vgpu_read(8)
  817. __vgpu_read(16)
  818. __vgpu_read(32)
  819. __vgpu_read(64)
  820. #undef __vgpu_read
  821. #undef VGPU_READ_FOOTER
  822. #undef VGPU_READ_HEADER
  823. #define GEN2_WRITE_HEADER \
  824. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  825. assert_rpm_wakelock_held(dev_priv); \
  826. #define GEN2_WRITE_FOOTER
  827. #define __gen2_write(x) \
  828. static void \
  829. gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  830. GEN2_WRITE_HEADER; \
  831. __raw_i915_write##x(dev_priv, reg, val); \
  832. GEN2_WRITE_FOOTER; \
  833. }
  834. #define __gen5_write(x) \
  835. static void \
  836. gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  837. GEN2_WRITE_HEADER; \
  838. ilk_dummy_write(dev_priv); \
  839. __raw_i915_write##x(dev_priv, reg, val); \
  840. GEN2_WRITE_FOOTER; \
  841. }
  842. __gen5_write(8)
  843. __gen5_write(16)
  844. __gen5_write(32)
  845. __gen5_write(64)
  846. __gen2_write(8)
  847. __gen2_write(16)
  848. __gen2_write(32)
  849. __gen2_write(64)
  850. #undef __gen5_write
  851. #undef __gen2_write
  852. #undef GEN2_WRITE_FOOTER
  853. #undef GEN2_WRITE_HEADER
  854. #define GEN6_WRITE_HEADER \
  855. u32 offset = i915_mmio_reg_offset(reg); \
  856. unsigned long irqflags; \
  857. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  858. assert_rpm_wakelock_held(dev_priv); \
  859. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  860. unclaimed_reg_debug(dev_priv, reg, false, true)
  861. #define GEN6_WRITE_FOOTER \
  862. unclaimed_reg_debug(dev_priv, reg, false, false); \
  863. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  864. #define __gen6_write(x) \
  865. static void \
  866. gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  867. u32 __fifo_ret = 0; \
  868. GEN6_WRITE_HEADER; \
  869. if (NEEDS_FORCE_WAKE(offset)) { \
  870. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  871. } \
  872. __raw_i915_write##x(dev_priv, reg, val); \
  873. if (unlikely(__fifo_ret)) { \
  874. gen6_gt_check_fifodbg(dev_priv); \
  875. } \
  876. GEN6_WRITE_FOOTER; \
  877. }
  878. #define __hsw_write(x) \
  879. static void \
  880. hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  881. u32 __fifo_ret = 0; \
  882. GEN6_WRITE_HEADER; \
  883. if (NEEDS_FORCE_WAKE(offset)) { \
  884. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  885. } \
  886. __raw_i915_write##x(dev_priv, reg, val); \
  887. if (unlikely(__fifo_ret)) { \
  888. gen6_gt_check_fifodbg(dev_priv); \
  889. } \
  890. GEN6_WRITE_FOOTER; \
  891. }
  892. #define __gen8_write(x) \
  893. static void \
  894. gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  895. enum forcewake_domains fw_engine; \
  896. GEN6_WRITE_HEADER; \
  897. fw_engine = __gen8_reg_write_fw_domains(offset); \
  898. if (fw_engine) \
  899. __force_wake_auto(dev_priv, fw_engine); \
  900. __raw_i915_write##x(dev_priv, reg, val); \
  901. GEN6_WRITE_FOOTER; \
  902. }
  903. #define __chv_write(x) \
  904. static void \
  905. chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  906. enum forcewake_domains fw_engine; \
  907. GEN6_WRITE_HEADER; \
  908. fw_engine = __chv_reg_write_fw_domains(offset); \
  909. if (fw_engine) \
  910. __force_wake_auto(dev_priv, fw_engine); \
  911. __raw_i915_write##x(dev_priv, reg, val); \
  912. GEN6_WRITE_FOOTER; \
  913. }
  914. #define __gen9_write(x) \
  915. static void \
  916. gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
  917. bool trace) { \
  918. enum forcewake_domains fw_engine; \
  919. GEN6_WRITE_HEADER; \
  920. fw_engine = __gen9_reg_write_fw_domains(offset); \
  921. if (fw_engine) \
  922. __force_wake_auto(dev_priv, fw_engine); \
  923. __raw_i915_write##x(dev_priv, reg, val); \
  924. GEN6_WRITE_FOOTER; \
  925. }
  926. __gen9_write(8)
  927. __gen9_write(16)
  928. __gen9_write(32)
  929. __gen9_write(64)
  930. __chv_write(8)
  931. __chv_write(16)
  932. __chv_write(32)
  933. __chv_write(64)
  934. __gen8_write(8)
  935. __gen8_write(16)
  936. __gen8_write(32)
  937. __gen8_write(64)
  938. __hsw_write(8)
  939. __hsw_write(16)
  940. __hsw_write(32)
  941. __hsw_write(64)
  942. __gen6_write(8)
  943. __gen6_write(16)
  944. __gen6_write(32)
  945. __gen6_write(64)
  946. #undef __gen9_write
  947. #undef __chv_write
  948. #undef __gen8_write
  949. #undef __hsw_write
  950. #undef __gen6_write
  951. #undef GEN6_WRITE_FOOTER
  952. #undef GEN6_WRITE_HEADER
  953. #define VGPU_WRITE_HEADER \
  954. unsigned long irqflags; \
  955. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  956. assert_rpm_device_not_suspended(dev_priv); \
  957. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  958. #define VGPU_WRITE_FOOTER \
  959. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  960. #define __vgpu_write(x) \
  961. static void vgpu_write##x(struct drm_i915_private *dev_priv, \
  962. i915_reg_t reg, u##x val, bool trace) { \
  963. VGPU_WRITE_HEADER; \
  964. __raw_i915_write##x(dev_priv, reg, val); \
  965. VGPU_WRITE_FOOTER; \
  966. }
  967. __vgpu_write(8)
  968. __vgpu_write(16)
  969. __vgpu_write(32)
  970. __vgpu_write(64)
  971. #undef __vgpu_write
  972. #undef VGPU_WRITE_FOOTER
  973. #undef VGPU_WRITE_HEADER
  974. #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
  975. do { \
  976. dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
  977. dev_priv->uncore.funcs.mmio_writew = x##_write16; \
  978. dev_priv->uncore.funcs.mmio_writel = x##_write32; \
  979. dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
  980. } while (0)
  981. #define ASSIGN_READ_MMIO_VFUNCS(x) \
  982. do { \
  983. dev_priv->uncore.funcs.mmio_readb = x##_read8; \
  984. dev_priv->uncore.funcs.mmio_readw = x##_read16; \
  985. dev_priv->uncore.funcs.mmio_readl = x##_read32; \
  986. dev_priv->uncore.funcs.mmio_readq = x##_read64; \
  987. } while (0)
  988. static void fw_domain_init(struct drm_i915_private *dev_priv,
  989. enum forcewake_domain_id domain_id,
  990. i915_reg_t reg_set,
  991. i915_reg_t reg_ack)
  992. {
  993. struct intel_uncore_forcewake_domain *d;
  994. if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
  995. return;
  996. d = &dev_priv->uncore.fw_domain[domain_id];
  997. WARN_ON(d->wake_count);
  998. d->wake_count = 0;
  999. d->reg_set = reg_set;
  1000. d->reg_ack = reg_ack;
  1001. if (IS_GEN6(dev_priv)) {
  1002. d->val_reset = 0;
  1003. d->val_set = FORCEWAKE_KERNEL;
  1004. d->val_clear = 0;
  1005. } else {
  1006. /* WaRsClearFWBitsAtReset:bdw,skl */
  1007. d->val_reset = _MASKED_BIT_DISABLE(0xffff);
  1008. d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
  1009. d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
  1010. }
  1011. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  1012. d->reg_post = FORCEWAKE_ACK_VLV;
  1013. else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
  1014. d->reg_post = ECOBUS;
  1015. d->i915 = dev_priv;
  1016. d->id = domain_id;
  1017. BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
  1018. BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
  1019. BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
  1020. d->mask = 1 << domain_id;
  1021. hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1022. d->timer.function = intel_uncore_fw_release_timer;
  1023. dev_priv->uncore.fw_domains |= (1 << domain_id);
  1024. fw_domain_reset(d);
  1025. }
  1026. static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
  1027. {
  1028. if (INTEL_INFO(dev_priv)->gen <= 5)
  1029. return;
  1030. if (IS_GEN9(dev_priv)) {
  1031. dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  1032. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1033. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1034. FORCEWAKE_RENDER_GEN9,
  1035. FORCEWAKE_ACK_RENDER_GEN9);
  1036. fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
  1037. FORCEWAKE_BLITTER_GEN9,
  1038. FORCEWAKE_ACK_BLITTER_GEN9);
  1039. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1040. FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
  1041. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1042. dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  1043. if (!IS_CHERRYVIEW(dev_priv))
  1044. dev_priv->uncore.funcs.force_wake_put =
  1045. fw_domains_put_with_fifo;
  1046. else
  1047. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1048. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1049. FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
  1050. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1051. FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
  1052. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  1053. dev_priv->uncore.funcs.force_wake_get =
  1054. fw_domains_get_with_thread_status;
  1055. if (IS_HASWELL(dev_priv))
  1056. dev_priv->uncore.funcs.force_wake_put =
  1057. fw_domains_put_with_fifo;
  1058. else
  1059. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1060. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1061. FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
  1062. } else if (IS_IVYBRIDGE(dev_priv)) {
  1063. u32 ecobus;
  1064. /* IVB configs may use multi-threaded forcewake */
  1065. /* A small trick here - if the bios hasn't configured
  1066. * MT forcewake, and if the device is in RC6, then
  1067. * force_wake_mt_get will not wake the device and the
  1068. * ECOBUS read will return zero. Which will be
  1069. * (correctly) interpreted by the test below as MT
  1070. * forcewake being disabled.
  1071. */
  1072. dev_priv->uncore.funcs.force_wake_get =
  1073. fw_domains_get_with_thread_status;
  1074. dev_priv->uncore.funcs.force_wake_put =
  1075. fw_domains_put_with_fifo;
  1076. /* We need to init first for ECOBUS access and then
  1077. * determine later if we want to reinit, in case of MT access is
  1078. * not working. In this stage we don't know which flavour this
  1079. * ivb is, so it is better to reset also the gen6 fw registers
  1080. * before the ecobus check.
  1081. */
  1082. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  1083. __raw_posting_read(dev_priv, ECOBUS);
  1084. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1085. FORCEWAKE_MT, FORCEWAKE_MT_ACK);
  1086. fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
  1087. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  1088. fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
  1089. if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
  1090. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  1091. DRM_INFO("when using vblank-synced partial screen updates.\n");
  1092. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1093. FORCEWAKE, FORCEWAKE_ACK);
  1094. }
  1095. } else if (IS_GEN6(dev_priv)) {
  1096. dev_priv->uncore.funcs.force_wake_get =
  1097. fw_domains_get_with_thread_status;
  1098. dev_priv->uncore.funcs.force_wake_put =
  1099. fw_domains_put_with_fifo;
  1100. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1101. FORCEWAKE, FORCEWAKE_ACK);
  1102. }
  1103. /* All future platforms are expected to require complex power gating */
  1104. WARN_ON(dev_priv->uncore.fw_domains == 0);
  1105. }
  1106. void intel_uncore_init(struct drm_i915_private *dev_priv)
  1107. {
  1108. i915_check_vgpu(dev_priv);
  1109. intel_uncore_edram_detect(dev_priv);
  1110. intel_uncore_fw_domains_init(dev_priv);
  1111. __intel_uncore_early_sanitize(dev_priv, false);
  1112. dev_priv->uncore.unclaimed_mmio_check = 1;
  1113. switch (INTEL_INFO(dev_priv)->gen) {
  1114. default:
  1115. case 9:
  1116. ASSIGN_WRITE_MMIO_VFUNCS(gen9);
  1117. ASSIGN_READ_MMIO_VFUNCS(gen9);
  1118. break;
  1119. case 8:
  1120. if (IS_CHERRYVIEW(dev_priv)) {
  1121. ASSIGN_WRITE_MMIO_VFUNCS(chv);
  1122. ASSIGN_READ_MMIO_VFUNCS(chv);
  1123. } else {
  1124. ASSIGN_WRITE_MMIO_VFUNCS(gen8);
  1125. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1126. }
  1127. break;
  1128. case 7:
  1129. case 6:
  1130. if (IS_HASWELL(dev_priv)) {
  1131. ASSIGN_WRITE_MMIO_VFUNCS(hsw);
  1132. } else {
  1133. ASSIGN_WRITE_MMIO_VFUNCS(gen6);
  1134. }
  1135. if (IS_VALLEYVIEW(dev_priv)) {
  1136. ASSIGN_READ_MMIO_VFUNCS(vlv);
  1137. } else {
  1138. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1139. }
  1140. break;
  1141. case 5:
  1142. ASSIGN_WRITE_MMIO_VFUNCS(gen5);
  1143. ASSIGN_READ_MMIO_VFUNCS(gen5);
  1144. break;
  1145. case 4:
  1146. case 3:
  1147. case 2:
  1148. ASSIGN_WRITE_MMIO_VFUNCS(gen2);
  1149. ASSIGN_READ_MMIO_VFUNCS(gen2);
  1150. break;
  1151. }
  1152. if (intel_vgpu_active(dev_priv)) {
  1153. ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
  1154. ASSIGN_READ_MMIO_VFUNCS(vgpu);
  1155. }
  1156. i915_check_and_clear_faults(dev_priv);
  1157. }
  1158. #undef ASSIGN_WRITE_MMIO_VFUNCS
  1159. #undef ASSIGN_READ_MMIO_VFUNCS
  1160. void intel_uncore_fini(struct drm_i915_private *dev_priv)
  1161. {
  1162. /* Paranoia: make sure we have disabled everything before we exit. */
  1163. intel_uncore_sanitize(dev_priv);
  1164. intel_uncore_forcewake_reset(dev_priv, false);
  1165. }
  1166. #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
  1167. static const struct register_whitelist {
  1168. i915_reg_t offset_ldw, offset_udw;
  1169. uint32_t size;
  1170. /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  1171. uint32_t gen_bitmask;
  1172. } whitelist[] = {
  1173. { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
  1174. .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
  1175. .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
  1176. };
  1177. int i915_reg_read_ioctl(struct drm_device *dev,
  1178. void *data, struct drm_file *file)
  1179. {
  1180. struct drm_i915_private *dev_priv = dev->dev_private;
  1181. struct drm_i915_reg_read *reg = data;
  1182. struct register_whitelist const *entry = whitelist;
  1183. unsigned size;
  1184. i915_reg_t offset_ldw, offset_udw;
  1185. int i, ret = 0;
  1186. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  1187. if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
  1188. (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
  1189. break;
  1190. }
  1191. if (i == ARRAY_SIZE(whitelist))
  1192. return -EINVAL;
  1193. /* We use the low bits to encode extra flags as the register should
  1194. * be naturally aligned (and those that are not so aligned merely
  1195. * limit the available flags for that register).
  1196. */
  1197. offset_ldw = entry->offset_ldw;
  1198. offset_udw = entry->offset_udw;
  1199. size = entry->size;
  1200. size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
  1201. intel_runtime_pm_get(dev_priv);
  1202. switch (size) {
  1203. case 8 | 1:
  1204. reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
  1205. break;
  1206. case 8:
  1207. reg->val = I915_READ64(offset_ldw);
  1208. break;
  1209. case 4:
  1210. reg->val = I915_READ(offset_ldw);
  1211. break;
  1212. case 2:
  1213. reg->val = I915_READ16(offset_ldw);
  1214. break;
  1215. case 1:
  1216. reg->val = I915_READ8(offset_ldw);
  1217. break;
  1218. default:
  1219. ret = -EINVAL;
  1220. goto out;
  1221. }
  1222. out:
  1223. intel_runtime_pm_put(dev_priv);
  1224. return ret;
  1225. }
  1226. static int i915_reset_complete(struct pci_dev *pdev)
  1227. {
  1228. u8 gdrst;
  1229. pci_read_config_byte(pdev, I915_GDRST, &gdrst);
  1230. return (gdrst & GRDOM_RESET_STATUS) == 0;
  1231. }
  1232. static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1233. {
  1234. struct pci_dev *pdev = dev_priv->dev->pdev;
  1235. /* assert reset for at least 20 usec */
  1236. pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1237. udelay(20);
  1238. pci_write_config_byte(pdev, I915_GDRST, 0);
  1239. return wait_for(i915_reset_complete(pdev), 500);
  1240. }
  1241. static int g4x_reset_complete(struct pci_dev *pdev)
  1242. {
  1243. u8 gdrst;
  1244. pci_read_config_byte(pdev, I915_GDRST, &gdrst);
  1245. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  1246. }
  1247. static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1248. {
  1249. struct pci_dev *pdev = dev_priv->dev->pdev;
  1250. pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1251. return wait_for(g4x_reset_complete(pdev), 500);
  1252. }
  1253. static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1254. {
  1255. struct pci_dev *pdev = dev_priv->dev->pdev;
  1256. int ret;
  1257. pci_write_config_byte(pdev, I915_GDRST,
  1258. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1259. ret = wait_for(g4x_reset_complete(pdev), 500);
  1260. if (ret)
  1261. return ret;
  1262. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1263. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  1264. POSTING_READ(VDECCLK_GATE_D);
  1265. pci_write_config_byte(pdev, I915_GDRST,
  1266. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1267. ret = wait_for(g4x_reset_complete(pdev), 500);
  1268. if (ret)
  1269. return ret;
  1270. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1271. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  1272. POSTING_READ(VDECCLK_GATE_D);
  1273. pci_write_config_byte(pdev, I915_GDRST, 0);
  1274. return 0;
  1275. }
  1276. static int ironlake_do_reset(struct drm_i915_private *dev_priv,
  1277. unsigned engine_mask)
  1278. {
  1279. int ret;
  1280. I915_WRITE(ILK_GDSR,
  1281. ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  1282. ret = wait_for((I915_READ(ILK_GDSR) &
  1283. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1284. if (ret)
  1285. return ret;
  1286. I915_WRITE(ILK_GDSR,
  1287. ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  1288. ret = wait_for((I915_READ(ILK_GDSR) &
  1289. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1290. if (ret)
  1291. return ret;
  1292. I915_WRITE(ILK_GDSR, 0);
  1293. return 0;
  1294. }
  1295. /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
  1296. static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
  1297. u32 hw_domain_mask)
  1298. {
  1299. int ret;
  1300. /* GEN6_GDRST is not in the gt power well, no need to check
  1301. * for fifo space for the write or forcewake the chip for
  1302. * the read
  1303. */
  1304. __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
  1305. #define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
  1306. /* Spin waiting for the device to ack the reset requests */
  1307. ret = wait_for(ACKED, 500);
  1308. #undef ACKED
  1309. return ret;
  1310. }
  1311. /**
  1312. * gen6_reset_engines - reset individual engines
  1313. * @dev_priv: i915 device
  1314. * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
  1315. *
  1316. * This function will reset the individual engines that are set in engine_mask.
  1317. * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
  1318. *
  1319. * Note: It is responsibility of the caller to handle the difference between
  1320. * asking full domain reset versus reset for all available individual engines.
  1321. *
  1322. * Returns 0 on success, nonzero on error.
  1323. */
  1324. static int gen6_reset_engines(struct drm_i915_private *dev_priv,
  1325. unsigned engine_mask)
  1326. {
  1327. struct intel_engine_cs *engine;
  1328. const u32 hw_engine_mask[I915_NUM_ENGINES] = {
  1329. [RCS] = GEN6_GRDOM_RENDER,
  1330. [BCS] = GEN6_GRDOM_BLT,
  1331. [VCS] = GEN6_GRDOM_MEDIA,
  1332. [VCS2] = GEN8_GRDOM_MEDIA2,
  1333. [VECS] = GEN6_GRDOM_VECS,
  1334. };
  1335. u32 hw_mask;
  1336. int ret;
  1337. if (engine_mask == ALL_ENGINES) {
  1338. hw_mask = GEN6_GRDOM_FULL;
  1339. } else {
  1340. hw_mask = 0;
  1341. for_each_engine_masked(engine, dev_priv, engine_mask)
  1342. hw_mask |= hw_engine_mask[engine->id];
  1343. }
  1344. ret = gen6_hw_domain_reset(dev_priv, hw_mask);
  1345. intel_uncore_forcewake_reset(dev_priv, true);
  1346. return ret;
  1347. }
  1348. static int wait_for_register_fw(struct drm_i915_private *dev_priv,
  1349. i915_reg_t reg,
  1350. const u32 mask,
  1351. const u32 value,
  1352. const unsigned long timeout_ms)
  1353. {
  1354. return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
  1355. }
  1356. static int gen8_request_engine_reset(struct intel_engine_cs *engine)
  1357. {
  1358. struct drm_i915_private *dev_priv = engine->i915;
  1359. int ret;
  1360. I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
  1361. _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
  1362. ret = wait_for_register_fw(dev_priv,
  1363. RING_RESET_CTL(engine->mmio_base),
  1364. RESET_CTL_READY_TO_RESET,
  1365. RESET_CTL_READY_TO_RESET,
  1366. 700);
  1367. if (ret)
  1368. DRM_ERROR("%s: reset request timeout\n", engine->name);
  1369. return ret;
  1370. }
  1371. static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
  1372. {
  1373. struct drm_i915_private *dev_priv = engine->i915;
  1374. I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
  1375. _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
  1376. }
  1377. static int gen8_reset_engines(struct drm_i915_private *dev_priv,
  1378. unsigned engine_mask)
  1379. {
  1380. struct intel_engine_cs *engine;
  1381. for_each_engine_masked(engine, dev_priv, engine_mask)
  1382. if (gen8_request_engine_reset(engine))
  1383. goto not_ready;
  1384. return gen6_reset_engines(dev_priv, engine_mask);
  1385. not_ready:
  1386. for_each_engine_masked(engine, dev_priv, engine_mask)
  1387. gen8_unrequest_engine_reset(engine);
  1388. return -EIO;
  1389. }
  1390. typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
  1391. static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
  1392. {
  1393. if (!i915.reset)
  1394. return NULL;
  1395. if (INTEL_INFO(dev_priv)->gen >= 8)
  1396. return gen8_reset_engines;
  1397. else if (INTEL_INFO(dev_priv)->gen >= 6)
  1398. return gen6_reset_engines;
  1399. else if (IS_GEN5(dev_priv))
  1400. return ironlake_do_reset;
  1401. else if (IS_G4X(dev_priv))
  1402. return g4x_do_reset;
  1403. else if (IS_G33(dev_priv))
  1404. return g33_do_reset;
  1405. else if (INTEL_INFO(dev_priv)->gen >= 3)
  1406. return i915_do_reset;
  1407. else
  1408. return NULL;
  1409. }
  1410. int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1411. {
  1412. reset_func reset;
  1413. int ret;
  1414. reset = intel_get_gpu_reset(dev_priv);
  1415. if (reset == NULL)
  1416. return -ENODEV;
  1417. /* If the power well sleeps during the reset, the reset
  1418. * request may be dropped and never completes (causing -EIO).
  1419. */
  1420. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1421. ret = reset(dev_priv, engine_mask);
  1422. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1423. return ret;
  1424. }
  1425. bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
  1426. {
  1427. return intel_get_gpu_reset(dev_priv) != NULL;
  1428. }
  1429. int intel_guc_reset(struct drm_i915_private *dev_priv)
  1430. {
  1431. int ret;
  1432. unsigned long irqflags;
  1433. if (!HAS_GUC(dev_priv))
  1434. return -EINVAL;
  1435. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1436. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  1437. ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
  1438. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  1439. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1440. return ret;
  1441. }
  1442. bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
  1443. {
  1444. return check_for_unclaimed_mmio(dev_priv);
  1445. }
  1446. bool
  1447. intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
  1448. {
  1449. if (unlikely(i915.mmio_debug ||
  1450. dev_priv->uncore.unclaimed_mmio_check <= 0))
  1451. return false;
  1452. if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
  1453. DRM_DEBUG("Unclaimed register detected, "
  1454. "enabling oneshot unclaimed register reporting. "
  1455. "Please use i915.mmio_debug=N for more information.\n");
  1456. i915.mmio_debug++;
  1457. dev_priv->uncore.unclaimed_mmio_check--;
  1458. return true;
  1459. }
  1460. return false;
  1461. }
  1462. static enum forcewake_domains
  1463. intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
  1464. i915_reg_t reg)
  1465. {
  1466. enum forcewake_domains fw_domains;
  1467. if (intel_vgpu_active(dev_priv))
  1468. return 0;
  1469. switch (INTEL_GEN(dev_priv)) {
  1470. case 9:
  1471. fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
  1472. break;
  1473. case 8:
  1474. if (IS_CHERRYVIEW(dev_priv))
  1475. fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
  1476. else
  1477. fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
  1478. break;
  1479. case 7:
  1480. case 6:
  1481. if (IS_VALLEYVIEW(dev_priv))
  1482. fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
  1483. else
  1484. fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
  1485. break;
  1486. default:
  1487. MISSING_CASE(INTEL_INFO(dev_priv)->gen);
  1488. case 5: /* forcewake was introduced with gen6 */
  1489. case 4:
  1490. case 3:
  1491. case 2:
  1492. return 0;
  1493. }
  1494. WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
  1495. return fw_domains;
  1496. }
  1497. static enum forcewake_domains
  1498. intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
  1499. i915_reg_t reg)
  1500. {
  1501. enum forcewake_domains fw_domains;
  1502. if (intel_vgpu_active(dev_priv))
  1503. return 0;
  1504. switch (INTEL_GEN(dev_priv)) {
  1505. case 9:
  1506. fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
  1507. break;
  1508. case 8:
  1509. if (IS_CHERRYVIEW(dev_priv))
  1510. fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
  1511. else
  1512. fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
  1513. break;
  1514. case 7:
  1515. case 6:
  1516. fw_domains = FORCEWAKE_RENDER;
  1517. break;
  1518. default:
  1519. MISSING_CASE(INTEL_INFO(dev_priv)->gen);
  1520. case 5:
  1521. case 4:
  1522. case 3:
  1523. case 2:
  1524. return 0;
  1525. }
  1526. WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
  1527. return fw_domains;
  1528. }
  1529. /**
  1530. * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
  1531. * a register
  1532. * @dev_priv: pointer to struct drm_i915_private
  1533. * @reg: register in question
  1534. * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
  1535. *
  1536. * Returns a set of forcewake domains required to be taken with for example
  1537. * intel_uncore_forcewake_get for the specified register to be accessible in the
  1538. * specified mode (read, write or read/write) with raw mmio accessors.
  1539. *
  1540. * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
  1541. * callers to do FIFO management on their own or risk losing writes.
  1542. */
  1543. enum forcewake_domains
  1544. intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
  1545. i915_reg_t reg, unsigned int op)
  1546. {
  1547. enum forcewake_domains fw_domains = 0;
  1548. WARN_ON(!op);
  1549. if (op & FW_REG_READ)
  1550. fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
  1551. if (op & FW_REG_WRITE)
  1552. fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
  1553. return fw_domains;
  1554. }