intel_uncore.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #include "i915_vgpu.h"
  26. #include <asm/iosf_mbi.h>
  27. #include <linux/pm_runtime.h>
  28. #define FORCEWAKE_ACK_TIMEOUT_MS 50
  29. #define GT_FIFO_TIMEOUT_MS 10
  30. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
  31. static const char * const forcewake_domain_names[] = {
  32. "render",
  33. "blitter",
  34. "media",
  35. };
  36. const char *
  37. intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  38. {
  39. BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  40. if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  41. return forcewake_domain_names[id];
  42. WARN_ON(id);
  43. return "unknown";
  44. }
  45. static inline void
  46. fw_domain_reset(struct drm_i915_private *i915,
  47. const struct intel_uncore_forcewake_domain *d)
  48. {
  49. __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
  50. }
  51. static inline void
  52. fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
  53. {
  54. d->wake_count++;
  55. hrtimer_start_range_ns(&d->timer,
  56. NSEC_PER_MSEC,
  57. NSEC_PER_MSEC,
  58. HRTIMER_MODE_REL);
  59. }
  60. static inline int
  61. __wait_for_ack(const struct drm_i915_private *i915,
  62. const struct intel_uncore_forcewake_domain *d,
  63. const u32 ack,
  64. const u32 value)
  65. {
  66. return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
  67. FORCEWAKE_ACK_TIMEOUT_MS);
  68. }
  69. static inline int
  70. wait_ack_clear(const struct drm_i915_private *i915,
  71. const struct intel_uncore_forcewake_domain *d,
  72. const u32 ack)
  73. {
  74. return __wait_for_ack(i915, d, ack, 0);
  75. }
  76. static inline int
  77. wait_ack_set(const struct drm_i915_private *i915,
  78. const struct intel_uncore_forcewake_domain *d,
  79. const u32 ack)
  80. {
  81. return __wait_for_ack(i915, d, ack, ack);
  82. }
  83. static inline void
  84. fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
  85. const struct intel_uncore_forcewake_domain *d)
  86. {
  87. if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
  88. DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
  89. intel_uncore_forcewake_domain_to_str(d->id));
  90. }
  91. enum ack_type {
  92. ACK_CLEAR = 0,
  93. ACK_SET
  94. };
  95. static int
  96. fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
  97. const struct intel_uncore_forcewake_domain *d,
  98. const enum ack_type type)
  99. {
  100. const u32 ack_bit = FORCEWAKE_KERNEL;
  101. const u32 value = type == ACK_SET ? ack_bit : 0;
  102. unsigned int pass;
  103. bool ack_detected;
  104. /*
  105. * There is a possibility of driver's wake request colliding
  106. * with hardware's own wake requests and that can cause
  107. * hardware to not deliver the driver's ack message.
  108. *
  109. * Use a fallback bit toggle to kick the gpu state machine
  110. * in the hope that the original ack will be delivered along with
  111. * the fallback ack.
  112. *
  113. * This workaround is described in HSDES #1604254524
  114. */
  115. pass = 1;
  116. do {
  117. wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
  118. __raw_i915_write32(i915, d->reg_set,
  119. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
  120. /* Give gt some time to relax before the polling frenzy */
  121. udelay(10 * pass);
  122. wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
  123. ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
  124. __raw_i915_write32(i915, d->reg_set,
  125. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
  126. } while (!ack_detected && pass++ < 10);
  127. DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
  128. intel_uncore_forcewake_domain_to_str(d->id),
  129. type == ACK_SET ? "set" : "clear",
  130. __raw_i915_read32(i915, d->reg_ack),
  131. pass);
  132. return ack_detected ? 0 : -ETIMEDOUT;
  133. }
  134. static inline void
  135. fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
  136. const struct intel_uncore_forcewake_domain *d)
  137. {
  138. if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
  139. return;
  140. if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
  141. fw_domain_wait_ack_clear(i915, d);
  142. }
  143. static inline void
  144. fw_domain_get(struct drm_i915_private *i915,
  145. const struct intel_uncore_forcewake_domain *d)
  146. {
  147. __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
  148. }
  149. static inline void
  150. fw_domain_wait_ack_set(const struct drm_i915_private *i915,
  151. const struct intel_uncore_forcewake_domain *d)
  152. {
  153. if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
  154. DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
  155. intel_uncore_forcewake_domain_to_str(d->id));
  156. }
  157. static inline void
  158. fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
  159. const struct intel_uncore_forcewake_domain *d)
  160. {
  161. if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
  162. return;
  163. if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
  164. fw_domain_wait_ack_set(i915, d);
  165. }
  166. static inline void
  167. fw_domain_put(const struct drm_i915_private *i915,
  168. const struct intel_uncore_forcewake_domain *d)
  169. {
  170. __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
  171. }
  172. static void
  173. fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
  174. {
  175. struct intel_uncore_forcewake_domain *d;
  176. unsigned int tmp;
  177. GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
  178. for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
  179. fw_domain_wait_ack_clear(i915, d);
  180. fw_domain_get(i915, d);
  181. }
  182. for_each_fw_domain_masked(d, fw_domains, i915, tmp)
  183. fw_domain_wait_ack_set(i915, d);
  184. i915->uncore.fw_domains_active |= fw_domains;
  185. }
  186. static void
  187. fw_domains_get_with_fallback(struct drm_i915_private *i915,
  188. enum forcewake_domains fw_domains)
  189. {
  190. struct intel_uncore_forcewake_domain *d;
  191. unsigned int tmp;
  192. GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
  193. for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
  194. fw_domain_wait_ack_clear_fallback(i915, d);
  195. fw_domain_get(i915, d);
  196. }
  197. for_each_fw_domain_masked(d, fw_domains, i915, tmp)
  198. fw_domain_wait_ack_set_fallback(i915, d);
  199. i915->uncore.fw_domains_active |= fw_domains;
  200. }
  201. static void
  202. fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
  203. {
  204. struct intel_uncore_forcewake_domain *d;
  205. unsigned int tmp;
  206. GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
  207. for_each_fw_domain_masked(d, fw_domains, i915, tmp)
  208. fw_domain_put(i915, d);
  209. i915->uncore.fw_domains_active &= ~fw_domains;
  210. }
  211. static void
  212. fw_domains_reset(struct drm_i915_private *i915,
  213. enum forcewake_domains fw_domains)
  214. {
  215. struct intel_uncore_forcewake_domain *d;
  216. unsigned int tmp;
  217. if (!fw_domains)
  218. return;
  219. GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
  220. for_each_fw_domain_masked(d, fw_domains, i915, tmp)
  221. fw_domain_reset(i915, d);
  222. }
  223. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  224. {
  225. /* w/a for a sporadic read returning 0 by waiting for the GT
  226. * thread to wake up.
  227. */
  228. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
  229. GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
  230. DRM_ERROR("GT thread status wait timed out\n");
  231. }
  232. static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
  233. enum forcewake_domains fw_domains)
  234. {
  235. fw_domains_get(dev_priv, fw_domains);
  236. /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
  237. __gen6_gt_wait_for_thread_c0(dev_priv);
  238. }
  239. static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
  240. {
  241. u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
  242. return count & GT_FIFO_FREE_ENTRIES_MASK;
  243. }
  244. static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  245. {
  246. u32 n;
  247. /* On VLV, FIFO will be shared by both SW and HW.
  248. * So, we need to read the FREE_ENTRIES everytime */
  249. if (IS_VALLEYVIEW(dev_priv))
  250. n = fifo_free_entries(dev_priv);
  251. else
  252. n = dev_priv->uncore.fifo_count;
  253. if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
  254. if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
  255. GT_FIFO_NUM_RESERVED_ENTRIES,
  256. GT_FIFO_TIMEOUT_MS)) {
  257. DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
  258. return;
  259. }
  260. }
  261. dev_priv->uncore.fifo_count = n - 1;
  262. }
  263. static enum hrtimer_restart
  264. intel_uncore_fw_release_timer(struct hrtimer *timer)
  265. {
  266. struct intel_uncore_forcewake_domain *domain =
  267. container_of(timer, struct intel_uncore_forcewake_domain, timer);
  268. struct drm_i915_private *dev_priv =
  269. container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
  270. unsigned long irqflags;
  271. assert_rpm_device_not_suspended(dev_priv);
  272. if (xchg(&domain->active, false))
  273. return HRTIMER_RESTART;
  274. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  275. if (WARN_ON(domain->wake_count == 0))
  276. domain->wake_count++;
  277. if (--domain->wake_count == 0)
  278. dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
  279. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  280. return HRTIMER_NORESTART;
  281. }
  282. /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
  283. static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
  284. bool restore)
  285. {
  286. unsigned long irqflags;
  287. struct intel_uncore_forcewake_domain *domain;
  288. int retry_count = 100;
  289. enum forcewake_domains fw, active_domains;
  290. iosf_mbi_assert_punit_acquired();
  291. /* Hold uncore.lock across reset to prevent any register access
  292. * with forcewake not set correctly. Wait until all pending
  293. * timers are run before holding.
  294. */
  295. while (1) {
  296. unsigned int tmp;
  297. active_domains = 0;
  298. for_each_fw_domain(domain, dev_priv, tmp) {
  299. smp_store_mb(domain->active, false);
  300. if (hrtimer_cancel(&domain->timer) == 0)
  301. continue;
  302. intel_uncore_fw_release_timer(&domain->timer);
  303. }
  304. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  305. for_each_fw_domain(domain, dev_priv, tmp) {
  306. if (hrtimer_active(&domain->timer))
  307. active_domains |= domain->mask;
  308. }
  309. if (active_domains == 0)
  310. break;
  311. if (--retry_count == 0) {
  312. DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
  313. break;
  314. }
  315. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  316. cond_resched();
  317. }
  318. WARN_ON(active_domains);
  319. fw = dev_priv->uncore.fw_domains_active;
  320. if (fw)
  321. dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
  322. fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
  323. if (restore) { /* If reset with a user forcewake, try to restore */
  324. if (fw)
  325. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  326. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
  327. dev_priv->uncore.fifo_count =
  328. fifo_free_entries(dev_priv);
  329. }
  330. if (!restore)
  331. assert_forcewakes_inactive(dev_priv);
  332. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  333. }
  334. static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
  335. {
  336. const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
  337. const unsigned int sets[4] = { 1, 1, 2, 2 };
  338. const u32 cap = dev_priv->edram_cap;
  339. return EDRAM_NUM_BANKS(cap) *
  340. ways[EDRAM_WAYS_IDX(cap)] *
  341. sets[EDRAM_SETS_IDX(cap)] *
  342. 1024 * 1024;
  343. }
  344. u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
  345. {
  346. if (!HAS_EDRAM(dev_priv))
  347. return 0;
  348. /* The needed capability bits for size calculation
  349. * are not there with pre gen9 so return 128MB always.
  350. */
  351. if (INTEL_GEN(dev_priv) < 9)
  352. return 128 * 1024 * 1024;
  353. return gen9_edram_size(dev_priv);
  354. }
  355. static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
  356. {
  357. if (IS_HASWELL(dev_priv) ||
  358. IS_BROADWELL(dev_priv) ||
  359. INTEL_GEN(dev_priv) >= 9) {
  360. dev_priv->edram_cap = __raw_i915_read32(dev_priv,
  361. HSW_EDRAM_CAP);
  362. /* NB: We can't write IDICR yet because we do not have gt funcs
  363. * set up */
  364. } else {
  365. dev_priv->edram_cap = 0;
  366. }
  367. if (HAS_EDRAM(dev_priv))
  368. DRM_INFO("Found %lluMB of eDRAM\n",
  369. intel_uncore_edram_size(dev_priv) / (1024 * 1024));
  370. }
  371. static bool
  372. fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  373. {
  374. u32 dbg;
  375. dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
  376. if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
  377. return false;
  378. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  379. return true;
  380. }
  381. static bool
  382. vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  383. {
  384. u32 cer;
  385. cer = __raw_i915_read32(dev_priv, CLAIM_ER);
  386. if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
  387. return false;
  388. __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
  389. return true;
  390. }
  391. static bool
  392. gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
  393. {
  394. u32 fifodbg;
  395. fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  396. if (unlikely(fifodbg)) {
  397. DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
  398. __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
  399. }
  400. return fifodbg;
  401. }
  402. static bool
  403. check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
  404. {
  405. bool ret = false;
  406. if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
  407. ret |= fpga_check_for_unclaimed_mmio(dev_priv);
  408. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  409. ret |= vlv_check_for_unclaimed_mmio(dev_priv);
  410. if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
  411. ret |= gen6_check_for_fifo_debug(dev_priv);
  412. return ret;
  413. }
  414. static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
  415. bool restore_forcewake)
  416. {
  417. /* clear out unclaimed reg detection bit */
  418. if (check_for_unclaimed_mmio(dev_priv))
  419. DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
  420. /* WaDisableShadowRegForCpd:chv */
  421. if (IS_CHERRYVIEW(dev_priv)) {
  422. __raw_i915_write32(dev_priv, GTFIFOCTL,
  423. __raw_i915_read32(dev_priv, GTFIFOCTL) |
  424. GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
  425. GT_FIFO_CTL_RC6_POLICY_STALL);
  426. }
  427. iosf_mbi_punit_acquire();
  428. intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
  429. iosf_mbi_punit_release();
  430. }
  431. void intel_uncore_suspend(struct drm_i915_private *dev_priv)
  432. {
  433. iosf_mbi_punit_acquire();
  434. iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
  435. &dev_priv->uncore.pmic_bus_access_nb);
  436. intel_uncore_forcewake_reset(dev_priv, false);
  437. iosf_mbi_punit_release();
  438. }
  439. void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
  440. {
  441. __intel_uncore_early_sanitize(dev_priv, true);
  442. iosf_mbi_register_pmic_bus_access_notifier(
  443. &dev_priv->uncore.pmic_bus_access_nb);
  444. i915_check_and_clear_faults(dev_priv);
  445. }
  446. void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
  447. {
  448. iosf_mbi_register_pmic_bus_access_notifier(
  449. &dev_priv->uncore.pmic_bus_access_nb);
  450. }
  451. void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
  452. {
  453. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  454. intel_sanitize_gt_powersave(dev_priv);
  455. }
  456. static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  457. enum forcewake_domains fw_domains)
  458. {
  459. struct intel_uncore_forcewake_domain *domain;
  460. unsigned int tmp;
  461. fw_domains &= dev_priv->uncore.fw_domains;
  462. for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
  463. if (domain->wake_count++) {
  464. fw_domains &= ~domain->mask;
  465. domain->active = true;
  466. }
  467. }
  468. if (fw_domains)
  469. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  470. }
  471. /**
  472. * intel_uncore_forcewake_get - grab forcewake domain references
  473. * @dev_priv: i915 device instance
  474. * @fw_domains: forcewake domains to get reference on
  475. *
  476. * This function can be used get GT's forcewake domain references.
  477. * Normal register access will handle the forcewake domains automatically.
  478. * However if some sequence requires the GT to not power down a particular
  479. * forcewake domains this function should be called at the beginning of the
  480. * sequence. And subsequently the reference should be dropped by symmetric
  481. * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
  482. * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
  483. */
  484. void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  485. enum forcewake_domains fw_domains)
  486. {
  487. unsigned long irqflags;
  488. if (!dev_priv->uncore.funcs.force_wake_get)
  489. return;
  490. assert_rpm_wakelock_held(dev_priv);
  491. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  492. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  493. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  494. }
  495. /**
  496. * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
  497. * @dev_priv: i915 device instance
  498. *
  499. * This function is a wrapper around intel_uncore_forcewake_get() to acquire
  500. * the GT powerwell and in the process disable our debugging for the
  501. * duration of userspace's bypass.
  502. */
  503. void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
  504. {
  505. spin_lock_irq(&dev_priv->uncore.lock);
  506. if (!dev_priv->uncore.user_forcewake.count++) {
  507. intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
  508. /* Save and disable mmio debugging for the user bypass */
  509. dev_priv->uncore.user_forcewake.saved_mmio_check =
  510. dev_priv->uncore.unclaimed_mmio_check;
  511. dev_priv->uncore.user_forcewake.saved_mmio_debug =
  512. i915_modparams.mmio_debug;
  513. dev_priv->uncore.unclaimed_mmio_check = 0;
  514. i915_modparams.mmio_debug = 0;
  515. }
  516. spin_unlock_irq(&dev_priv->uncore.lock);
  517. }
  518. /**
  519. * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
  520. * @dev_priv: i915 device instance
  521. *
  522. * This function complements intel_uncore_forcewake_user_get() and releases
  523. * the GT powerwell taken on behalf of the userspace bypass.
  524. */
  525. void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
  526. {
  527. spin_lock_irq(&dev_priv->uncore.lock);
  528. if (!--dev_priv->uncore.user_forcewake.count) {
  529. if (intel_uncore_unclaimed_mmio(dev_priv))
  530. dev_info(dev_priv->drm.dev,
  531. "Invalid mmio detected during user access\n");
  532. dev_priv->uncore.unclaimed_mmio_check =
  533. dev_priv->uncore.user_forcewake.saved_mmio_check;
  534. i915_modparams.mmio_debug =
  535. dev_priv->uncore.user_forcewake.saved_mmio_debug;
  536. intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
  537. }
  538. spin_unlock_irq(&dev_priv->uncore.lock);
  539. }
  540. /**
  541. * intel_uncore_forcewake_get__locked - grab forcewake domain references
  542. * @dev_priv: i915 device instance
  543. * @fw_domains: forcewake domains to get reference on
  544. *
  545. * See intel_uncore_forcewake_get(). This variant places the onus
  546. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  547. */
  548. void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
  549. enum forcewake_domains fw_domains)
  550. {
  551. lockdep_assert_held(&dev_priv->uncore.lock);
  552. if (!dev_priv->uncore.funcs.force_wake_get)
  553. return;
  554. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  555. }
  556. static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  557. enum forcewake_domains fw_domains)
  558. {
  559. struct intel_uncore_forcewake_domain *domain;
  560. unsigned int tmp;
  561. fw_domains &= dev_priv->uncore.fw_domains;
  562. for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
  563. if (WARN_ON(domain->wake_count == 0))
  564. continue;
  565. if (--domain->wake_count) {
  566. domain->active = true;
  567. continue;
  568. }
  569. fw_domain_arm_timer(domain);
  570. }
  571. }
  572. /**
  573. * intel_uncore_forcewake_put - release a forcewake domain reference
  574. * @dev_priv: i915 device instance
  575. * @fw_domains: forcewake domains to put references
  576. *
  577. * This function drops the device-level forcewakes for specified
  578. * domains obtained by intel_uncore_forcewake_get().
  579. */
  580. void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  581. enum forcewake_domains fw_domains)
  582. {
  583. unsigned long irqflags;
  584. if (!dev_priv->uncore.funcs.force_wake_put)
  585. return;
  586. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  587. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  588. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  589. }
  590. /**
  591. * intel_uncore_forcewake_put__locked - grab forcewake domain references
  592. * @dev_priv: i915 device instance
  593. * @fw_domains: forcewake domains to get reference on
  594. *
  595. * See intel_uncore_forcewake_put(). This variant places the onus
  596. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  597. */
  598. void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
  599. enum forcewake_domains fw_domains)
  600. {
  601. lockdep_assert_held(&dev_priv->uncore.lock);
  602. if (!dev_priv->uncore.funcs.force_wake_put)
  603. return;
  604. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  605. }
  606. void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
  607. {
  608. if (!dev_priv->uncore.funcs.force_wake_get)
  609. return;
  610. WARN(dev_priv->uncore.fw_domains_active,
  611. "Expected all fw_domains to be inactive, but %08x are still on\n",
  612. dev_priv->uncore.fw_domains_active);
  613. }
  614. void assert_forcewakes_active(struct drm_i915_private *dev_priv,
  615. enum forcewake_domains fw_domains)
  616. {
  617. if (!dev_priv->uncore.funcs.force_wake_get)
  618. return;
  619. assert_rpm_wakelock_held(dev_priv);
  620. fw_domains &= dev_priv->uncore.fw_domains;
  621. WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
  622. "Expected %08x fw_domains to be active, but %08x are off\n",
  623. fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
  624. }
  625. /* We give fast paths for the really cool registers */
  626. #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
  627. #define __gen6_reg_read_fw_domains(offset) \
  628. ({ \
  629. enum forcewake_domains __fwd; \
  630. if (NEEDS_FORCE_WAKE(offset)) \
  631. __fwd = FORCEWAKE_RENDER; \
  632. else \
  633. __fwd = 0; \
  634. __fwd; \
  635. })
  636. static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
  637. {
  638. if (offset < entry->start)
  639. return -1;
  640. else if (offset > entry->end)
  641. return 1;
  642. else
  643. return 0;
  644. }
  645. /* Copied and "macroized" from lib/bsearch.c */
  646. #define BSEARCH(key, base, num, cmp) ({ \
  647. unsigned int start__ = 0, end__ = (num); \
  648. typeof(base) result__ = NULL; \
  649. while (start__ < end__) { \
  650. unsigned int mid__ = start__ + (end__ - start__) / 2; \
  651. int ret__ = (cmp)((key), (base) + mid__); \
  652. if (ret__ < 0) { \
  653. end__ = mid__; \
  654. } else if (ret__ > 0) { \
  655. start__ = mid__ + 1; \
  656. } else { \
  657. result__ = (base) + mid__; \
  658. break; \
  659. } \
  660. } \
  661. result__; \
  662. })
  663. static enum forcewake_domains
  664. find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
  665. {
  666. const struct intel_forcewake_range *entry;
  667. entry = BSEARCH(offset,
  668. dev_priv->uncore.fw_domains_table,
  669. dev_priv->uncore.fw_domains_table_entries,
  670. fw_range_cmp);
  671. if (!entry)
  672. return 0;
  673. WARN(entry->domains & ~dev_priv->uncore.fw_domains,
  674. "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
  675. entry->domains & ~dev_priv->uncore.fw_domains, offset);
  676. return entry->domains;
  677. }
  678. #define GEN_FW_RANGE(s, e, d) \
  679. { .start = (s), .end = (e), .domains = (d) }
  680. #define HAS_FWTABLE(dev_priv) \
  681. (INTEL_GEN(dev_priv) >= 9 || \
  682. IS_CHERRYVIEW(dev_priv) || \
  683. IS_VALLEYVIEW(dev_priv))
  684. /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  685. static const struct intel_forcewake_range __vlv_fw_ranges[] = {
  686. GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
  687. GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
  688. GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
  689. GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
  690. GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
  691. GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
  692. GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
  693. };
  694. #define __fwtable_reg_read_fw_domains(offset) \
  695. ({ \
  696. enum forcewake_domains __fwd = 0; \
  697. if (NEEDS_FORCE_WAKE((offset))) \
  698. __fwd = find_fw_domain(dev_priv, offset); \
  699. __fwd; \
  700. })
  701. /* *Must* be sorted by offset! See intel_shadow_table_check(). */
  702. static const i915_reg_t gen8_shadowed_regs[] = {
  703. RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
  704. GEN6_RPNSWREQ, /* 0xA008 */
  705. GEN6_RC_VIDEO_FREQ, /* 0xA00C */
  706. RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
  707. RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
  708. RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
  709. /* TODO: Other registers are not yet used */
  710. };
  711. static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
  712. {
  713. u32 offset = i915_mmio_reg_offset(*reg);
  714. if (key < offset)
  715. return -1;
  716. else if (key > offset)
  717. return 1;
  718. else
  719. return 0;
  720. }
  721. static bool is_gen8_shadowed(u32 offset)
  722. {
  723. const i915_reg_t *regs = gen8_shadowed_regs;
  724. return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
  725. mmio_reg_cmp);
  726. }
  727. #define __gen8_reg_write_fw_domains(offset) \
  728. ({ \
  729. enum forcewake_domains __fwd; \
  730. if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
  731. __fwd = FORCEWAKE_RENDER; \
  732. else \
  733. __fwd = 0; \
  734. __fwd; \
  735. })
  736. /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  737. static const struct intel_forcewake_range __chv_fw_ranges[] = {
  738. GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
  739. GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  740. GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
  741. GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  742. GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
  743. GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  744. GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
  745. GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  746. GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
  747. GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
  748. GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
  749. GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  750. GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
  751. GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
  752. GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
  753. GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
  754. };
  755. #define __fwtable_reg_write_fw_domains(offset) \
  756. ({ \
  757. enum forcewake_domains __fwd = 0; \
  758. if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
  759. __fwd = find_fw_domain(dev_priv, offset); \
  760. __fwd; \
  761. })
  762. /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
  763. static const struct intel_forcewake_range __gen9_fw_ranges[] = {
  764. GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
  765. GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
  766. GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
  767. GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
  768. GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
  769. GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
  770. GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
  771. GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
  772. GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
  773. GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
  774. GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
  775. GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
  776. GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
  777. GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
  778. GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
  779. GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
  780. GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
  781. GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
  782. GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
  783. GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
  784. GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
  785. GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
  786. GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
  787. GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
  788. GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
  789. GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
  790. GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
  791. GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
  792. GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
  793. GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
  794. GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
  795. GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
  796. };
  797. static void
  798. ilk_dummy_write(struct drm_i915_private *dev_priv)
  799. {
  800. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  801. * the chip from rc6 before touching it for real. MI_MODE is masked,
  802. * hence harmless to write 0 into. */
  803. __raw_i915_write32(dev_priv, MI_MODE, 0);
  804. }
  805. static void
  806. __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
  807. const i915_reg_t reg,
  808. const bool read,
  809. const bool before)
  810. {
  811. if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
  812. "Unclaimed %s register 0x%x\n",
  813. read ? "read from" : "write to",
  814. i915_mmio_reg_offset(reg)))
  815. /* Only report the first N failures */
  816. i915_modparams.mmio_debug--;
  817. }
  818. static inline void
  819. unclaimed_reg_debug(struct drm_i915_private *dev_priv,
  820. const i915_reg_t reg,
  821. const bool read,
  822. const bool before)
  823. {
  824. if (likely(!i915_modparams.mmio_debug))
  825. return;
  826. __unclaimed_reg_debug(dev_priv, reg, read, before);
  827. }
  828. #define GEN2_READ_HEADER(x) \
  829. u##x val = 0; \
  830. assert_rpm_wakelock_held(dev_priv);
  831. #define GEN2_READ_FOOTER \
  832. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  833. return val
  834. #define __gen2_read(x) \
  835. static u##x \
  836. gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  837. GEN2_READ_HEADER(x); \
  838. val = __raw_i915_read##x(dev_priv, reg); \
  839. GEN2_READ_FOOTER; \
  840. }
  841. #define __gen5_read(x) \
  842. static u##x \
  843. gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  844. GEN2_READ_HEADER(x); \
  845. ilk_dummy_write(dev_priv); \
  846. val = __raw_i915_read##x(dev_priv, reg); \
  847. GEN2_READ_FOOTER; \
  848. }
  849. __gen5_read(8)
  850. __gen5_read(16)
  851. __gen5_read(32)
  852. __gen5_read(64)
  853. __gen2_read(8)
  854. __gen2_read(16)
  855. __gen2_read(32)
  856. __gen2_read(64)
  857. #undef __gen5_read
  858. #undef __gen2_read
  859. #undef GEN2_READ_FOOTER
  860. #undef GEN2_READ_HEADER
  861. #define GEN6_READ_HEADER(x) \
  862. u32 offset = i915_mmio_reg_offset(reg); \
  863. unsigned long irqflags; \
  864. u##x val = 0; \
  865. assert_rpm_wakelock_held(dev_priv); \
  866. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  867. unclaimed_reg_debug(dev_priv, reg, true, true)
  868. #define GEN6_READ_FOOTER \
  869. unclaimed_reg_debug(dev_priv, reg, true, false); \
  870. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  871. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  872. return val
  873. static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
  874. enum forcewake_domains fw_domains)
  875. {
  876. struct intel_uncore_forcewake_domain *domain;
  877. unsigned int tmp;
  878. GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
  879. for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
  880. fw_domain_arm_timer(domain);
  881. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  882. }
  883. static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
  884. enum forcewake_domains fw_domains)
  885. {
  886. if (WARN_ON(!fw_domains))
  887. return;
  888. /* Turn on all requested but inactive supported forcewake domains. */
  889. fw_domains &= dev_priv->uncore.fw_domains;
  890. fw_domains &= ~dev_priv->uncore.fw_domains_active;
  891. if (fw_domains)
  892. ___force_wake_auto(dev_priv, fw_domains);
  893. }
  894. #define __gen_read(func, x) \
  895. static u##x \
  896. func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
  897. enum forcewake_domains fw_engine; \
  898. GEN6_READ_HEADER(x); \
  899. fw_engine = __##func##_reg_read_fw_domains(offset); \
  900. if (fw_engine) \
  901. __force_wake_auto(dev_priv, fw_engine); \
  902. val = __raw_i915_read##x(dev_priv, reg); \
  903. GEN6_READ_FOOTER; \
  904. }
  905. #define __gen6_read(x) __gen_read(gen6, x)
  906. #define __fwtable_read(x) __gen_read(fwtable, x)
  907. __fwtable_read(8)
  908. __fwtable_read(16)
  909. __fwtable_read(32)
  910. __fwtable_read(64)
  911. __gen6_read(8)
  912. __gen6_read(16)
  913. __gen6_read(32)
  914. __gen6_read(64)
  915. #undef __fwtable_read
  916. #undef __gen6_read
  917. #undef GEN6_READ_FOOTER
  918. #undef GEN6_READ_HEADER
  919. #define GEN2_WRITE_HEADER \
  920. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  921. assert_rpm_wakelock_held(dev_priv); \
  922. #define GEN2_WRITE_FOOTER
  923. #define __gen2_write(x) \
  924. static void \
  925. gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  926. GEN2_WRITE_HEADER; \
  927. __raw_i915_write##x(dev_priv, reg, val); \
  928. GEN2_WRITE_FOOTER; \
  929. }
  930. #define __gen5_write(x) \
  931. static void \
  932. gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  933. GEN2_WRITE_HEADER; \
  934. ilk_dummy_write(dev_priv); \
  935. __raw_i915_write##x(dev_priv, reg, val); \
  936. GEN2_WRITE_FOOTER; \
  937. }
  938. __gen5_write(8)
  939. __gen5_write(16)
  940. __gen5_write(32)
  941. __gen2_write(8)
  942. __gen2_write(16)
  943. __gen2_write(32)
  944. #undef __gen5_write
  945. #undef __gen2_write
  946. #undef GEN2_WRITE_FOOTER
  947. #undef GEN2_WRITE_HEADER
  948. #define GEN6_WRITE_HEADER \
  949. u32 offset = i915_mmio_reg_offset(reg); \
  950. unsigned long irqflags; \
  951. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  952. assert_rpm_wakelock_held(dev_priv); \
  953. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  954. unclaimed_reg_debug(dev_priv, reg, false, true)
  955. #define GEN6_WRITE_FOOTER \
  956. unclaimed_reg_debug(dev_priv, reg, false, false); \
  957. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  958. #define __gen6_write(x) \
  959. static void \
  960. gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  961. GEN6_WRITE_HEADER; \
  962. if (NEEDS_FORCE_WAKE(offset)) \
  963. __gen6_gt_wait_for_fifo(dev_priv); \
  964. __raw_i915_write##x(dev_priv, reg, val); \
  965. GEN6_WRITE_FOOTER; \
  966. }
  967. #define __gen_write(func, x) \
  968. static void \
  969. func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
  970. enum forcewake_domains fw_engine; \
  971. GEN6_WRITE_HEADER; \
  972. fw_engine = __##func##_reg_write_fw_domains(offset); \
  973. if (fw_engine) \
  974. __force_wake_auto(dev_priv, fw_engine); \
  975. __raw_i915_write##x(dev_priv, reg, val); \
  976. GEN6_WRITE_FOOTER; \
  977. }
  978. #define __gen8_write(x) __gen_write(gen8, x)
  979. #define __fwtable_write(x) __gen_write(fwtable, x)
  980. __fwtable_write(8)
  981. __fwtable_write(16)
  982. __fwtable_write(32)
  983. __gen8_write(8)
  984. __gen8_write(16)
  985. __gen8_write(32)
  986. __gen6_write(8)
  987. __gen6_write(16)
  988. __gen6_write(32)
  989. #undef __fwtable_write
  990. #undef __gen8_write
  991. #undef __gen6_write
  992. #undef GEN6_WRITE_FOOTER
  993. #undef GEN6_WRITE_HEADER
  994. #define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
  995. do { \
  996. (i915)->uncore.funcs.mmio_writeb = x##_write8; \
  997. (i915)->uncore.funcs.mmio_writew = x##_write16; \
  998. (i915)->uncore.funcs.mmio_writel = x##_write32; \
  999. } while (0)
  1000. #define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
  1001. do { \
  1002. (i915)->uncore.funcs.mmio_readb = x##_read8; \
  1003. (i915)->uncore.funcs.mmio_readw = x##_read16; \
  1004. (i915)->uncore.funcs.mmio_readl = x##_read32; \
  1005. (i915)->uncore.funcs.mmio_readq = x##_read64; \
  1006. } while (0)
  1007. static void fw_domain_init(struct drm_i915_private *dev_priv,
  1008. enum forcewake_domain_id domain_id,
  1009. i915_reg_t reg_set,
  1010. i915_reg_t reg_ack)
  1011. {
  1012. struct intel_uncore_forcewake_domain *d;
  1013. if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
  1014. return;
  1015. d = &dev_priv->uncore.fw_domain[domain_id];
  1016. WARN_ON(d->wake_count);
  1017. WARN_ON(!i915_mmio_reg_valid(reg_set));
  1018. WARN_ON(!i915_mmio_reg_valid(reg_ack));
  1019. d->wake_count = 0;
  1020. d->reg_set = reg_set;
  1021. d->reg_ack = reg_ack;
  1022. d->id = domain_id;
  1023. BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
  1024. BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
  1025. BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
  1026. d->mask = BIT(domain_id);
  1027. hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1028. d->timer.function = intel_uncore_fw_release_timer;
  1029. dev_priv->uncore.fw_domains |= BIT(domain_id);
  1030. fw_domain_reset(dev_priv, d);
  1031. }
  1032. static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
  1033. {
  1034. if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
  1035. return;
  1036. if (IS_GEN6(dev_priv)) {
  1037. dev_priv->uncore.fw_reset = 0;
  1038. dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
  1039. dev_priv->uncore.fw_clear = 0;
  1040. } else {
  1041. /* WaRsClearFWBitsAtReset:bdw,skl */
  1042. dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
  1043. dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
  1044. dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
  1045. }
  1046. if (INTEL_GEN(dev_priv) >= 9) {
  1047. dev_priv->uncore.funcs.force_wake_get =
  1048. fw_domains_get_with_fallback;
  1049. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1050. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1051. FORCEWAKE_RENDER_GEN9,
  1052. FORCEWAKE_ACK_RENDER_GEN9);
  1053. fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
  1054. FORCEWAKE_BLITTER_GEN9,
  1055. FORCEWAKE_ACK_BLITTER_GEN9);
  1056. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1057. FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
  1058. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1059. dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  1060. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1061. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1062. FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
  1063. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  1064. FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
  1065. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  1066. dev_priv->uncore.funcs.force_wake_get =
  1067. fw_domains_get_with_thread_status;
  1068. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1069. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1070. FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
  1071. } else if (IS_IVYBRIDGE(dev_priv)) {
  1072. u32 ecobus;
  1073. /* IVB configs may use multi-threaded forcewake */
  1074. /* A small trick here - if the bios hasn't configured
  1075. * MT forcewake, and if the device is in RC6, then
  1076. * force_wake_mt_get will not wake the device and the
  1077. * ECOBUS read will return zero. Which will be
  1078. * (correctly) interpreted by the test below as MT
  1079. * forcewake being disabled.
  1080. */
  1081. dev_priv->uncore.funcs.force_wake_get =
  1082. fw_domains_get_with_thread_status;
  1083. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1084. /* We need to init first for ECOBUS access and then
  1085. * determine later if we want to reinit, in case of MT access is
  1086. * not working. In this stage we don't know which flavour this
  1087. * ivb is, so it is better to reset also the gen6 fw registers
  1088. * before the ecobus check.
  1089. */
  1090. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  1091. __raw_posting_read(dev_priv, ECOBUS);
  1092. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1093. FORCEWAKE_MT, FORCEWAKE_MT_ACK);
  1094. spin_lock_irq(&dev_priv->uncore.lock);
  1095. fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
  1096. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  1097. fw_domains_put(dev_priv, FORCEWAKE_RENDER);
  1098. spin_unlock_irq(&dev_priv->uncore.lock);
  1099. if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
  1100. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  1101. DRM_INFO("when using vblank-synced partial screen updates.\n");
  1102. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1103. FORCEWAKE, FORCEWAKE_ACK);
  1104. }
  1105. } else if (IS_GEN6(dev_priv)) {
  1106. dev_priv->uncore.funcs.force_wake_get =
  1107. fw_domains_get_with_thread_status;
  1108. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  1109. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1110. FORCEWAKE, FORCEWAKE_ACK);
  1111. }
  1112. /* All future platforms are expected to require complex power gating */
  1113. WARN_ON(dev_priv->uncore.fw_domains == 0);
  1114. }
  1115. #define ASSIGN_FW_DOMAINS_TABLE(d) \
  1116. { \
  1117. dev_priv->uncore.fw_domains_table = \
  1118. (struct intel_forcewake_range *)(d); \
  1119. dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
  1120. }
  1121. static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
  1122. unsigned long action, void *data)
  1123. {
  1124. struct drm_i915_private *dev_priv = container_of(nb,
  1125. struct drm_i915_private, uncore.pmic_bus_access_nb);
  1126. switch (action) {
  1127. case MBI_PMIC_BUS_ACCESS_BEGIN:
  1128. /*
  1129. * forcewake all now to make sure that we don't need to do a
  1130. * forcewake later which on systems where this notifier gets
  1131. * called requires the punit to access to the shared pmic i2c
  1132. * bus, which will be busy after this notification, leading to:
  1133. * "render: timed out waiting for forcewake ack request."
  1134. * errors.
  1135. *
  1136. * The notifier is unregistered during intel_runtime_suspend(),
  1137. * so it's ok to access the HW here without holding a RPM
  1138. * wake reference -> disable wakeref asserts for the time of
  1139. * the access.
  1140. */
  1141. disable_rpm_wakeref_asserts(dev_priv);
  1142. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1143. enable_rpm_wakeref_asserts(dev_priv);
  1144. break;
  1145. case MBI_PMIC_BUS_ACCESS_END:
  1146. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1147. break;
  1148. }
  1149. return NOTIFY_OK;
  1150. }
  1151. void intel_uncore_init(struct drm_i915_private *dev_priv)
  1152. {
  1153. i915_check_vgpu(dev_priv);
  1154. intel_uncore_edram_detect(dev_priv);
  1155. intel_uncore_fw_domains_init(dev_priv);
  1156. __intel_uncore_early_sanitize(dev_priv, false);
  1157. dev_priv->uncore.unclaimed_mmio_check = 1;
  1158. dev_priv->uncore.pmic_bus_access_nb.notifier_call =
  1159. i915_pmic_bus_access_notifier;
  1160. if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
  1161. ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
  1162. ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
  1163. } else if (IS_GEN5(dev_priv)) {
  1164. ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
  1165. ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
  1166. } else if (IS_GEN(dev_priv, 6, 7)) {
  1167. ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
  1168. if (IS_VALLEYVIEW(dev_priv)) {
  1169. ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
  1170. ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
  1171. } else {
  1172. ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
  1173. }
  1174. } else if (IS_GEN8(dev_priv)) {
  1175. if (IS_CHERRYVIEW(dev_priv)) {
  1176. ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
  1177. ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
  1178. ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
  1179. } else {
  1180. ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
  1181. ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
  1182. }
  1183. } else {
  1184. ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
  1185. ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
  1186. ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
  1187. }
  1188. iosf_mbi_register_pmic_bus_access_notifier(
  1189. &dev_priv->uncore.pmic_bus_access_nb);
  1190. }
  1191. void intel_uncore_fini(struct drm_i915_private *dev_priv)
  1192. {
  1193. /* Paranoia: make sure we have disabled everything before we exit. */
  1194. intel_uncore_sanitize(dev_priv);
  1195. iosf_mbi_punit_acquire();
  1196. iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
  1197. &dev_priv->uncore.pmic_bus_access_nb);
  1198. intel_uncore_forcewake_reset(dev_priv, false);
  1199. iosf_mbi_punit_release();
  1200. }
  1201. static const struct reg_whitelist {
  1202. i915_reg_t offset_ldw;
  1203. i915_reg_t offset_udw;
  1204. u16 gen_mask;
  1205. u8 size;
  1206. } reg_read_whitelist[] = { {
  1207. .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
  1208. .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
  1209. .gen_mask = INTEL_GEN_MASK(4, 10),
  1210. .size = 8
  1211. } };
  1212. int i915_reg_read_ioctl(struct drm_device *dev,
  1213. void *data, struct drm_file *file)
  1214. {
  1215. struct drm_i915_private *dev_priv = to_i915(dev);
  1216. struct drm_i915_reg_read *reg = data;
  1217. struct reg_whitelist const *entry;
  1218. unsigned int flags;
  1219. int remain;
  1220. int ret = 0;
  1221. entry = reg_read_whitelist;
  1222. remain = ARRAY_SIZE(reg_read_whitelist);
  1223. while (remain) {
  1224. u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
  1225. GEM_BUG_ON(!is_power_of_2(entry->size));
  1226. GEM_BUG_ON(entry->size > 8);
  1227. GEM_BUG_ON(entry_offset & (entry->size - 1));
  1228. if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
  1229. entry_offset == (reg->offset & -entry->size))
  1230. break;
  1231. entry++;
  1232. remain--;
  1233. }
  1234. if (!remain)
  1235. return -EINVAL;
  1236. flags = reg->offset & (entry->size - 1);
  1237. intel_runtime_pm_get(dev_priv);
  1238. if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
  1239. reg->val = I915_READ64_2x32(entry->offset_ldw,
  1240. entry->offset_udw);
  1241. else if (entry->size == 8 && flags == 0)
  1242. reg->val = I915_READ64(entry->offset_ldw);
  1243. else if (entry->size == 4 && flags == 0)
  1244. reg->val = I915_READ(entry->offset_ldw);
  1245. else if (entry->size == 2 && flags == 0)
  1246. reg->val = I915_READ16(entry->offset_ldw);
  1247. else if (entry->size == 1 && flags == 0)
  1248. reg->val = I915_READ8(entry->offset_ldw);
  1249. else
  1250. ret = -EINVAL;
  1251. intel_runtime_pm_put(dev_priv);
  1252. return ret;
  1253. }
  1254. static void gen3_stop_engine(struct intel_engine_cs *engine)
  1255. {
  1256. struct drm_i915_private *dev_priv = engine->i915;
  1257. const u32 base = engine->mmio_base;
  1258. const i915_reg_t mode = RING_MI_MODE(base);
  1259. I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
  1260. if (intel_wait_for_register_fw(dev_priv,
  1261. mode,
  1262. MODE_IDLE,
  1263. MODE_IDLE,
  1264. 500))
  1265. DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
  1266. engine->name);
  1267. I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
  1268. I915_WRITE_FW(RING_HEAD(base), 0);
  1269. I915_WRITE_FW(RING_TAIL(base), 0);
  1270. /* The ring must be empty before it is disabled */
  1271. I915_WRITE_FW(RING_CTL(base), 0);
  1272. /* Check acts as a post */
  1273. if (I915_READ_FW(RING_HEAD(base)) != 0)
  1274. DRM_DEBUG_DRIVER("%s: ring head not parked\n",
  1275. engine->name);
  1276. }
  1277. static void i915_stop_engines(struct drm_i915_private *dev_priv,
  1278. unsigned engine_mask)
  1279. {
  1280. struct intel_engine_cs *engine;
  1281. enum intel_engine_id id;
  1282. if (INTEL_GEN(dev_priv) < 3)
  1283. return;
  1284. for_each_engine_masked(engine, dev_priv, engine_mask, id)
  1285. gen3_stop_engine(engine);
  1286. }
  1287. static bool i915_reset_complete(struct pci_dev *pdev)
  1288. {
  1289. u8 gdrst;
  1290. pci_read_config_byte(pdev, I915_GDRST, &gdrst);
  1291. return (gdrst & GRDOM_RESET_STATUS) == 0;
  1292. }
  1293. static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1294. {
  1295. struct pci_dev *pdev = dev_priv->drm.pdev;
  1296. /* assert reset for at least 20 usec */
  1297. pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1298. usleep_range(50, 200);
  1299. pci_write_config_byte(pdev, I915_GDRST, 0);
  1300. return wait_for(i915_reset_complete(pdev), 500);
  1301. }
  1302. static bool g4x_reset_complete(struct pci_dev *pdev)
  1303. {
  1304. u8 gdrst;
  1305. pci_read_config_byte(pdev, I915_GDRST, &gdrst);
  1306. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  1307. }
  1308. static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1309. {
  1310. struct pci_dev *pdev = dev_priv->drm.pdev;
  1311. pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1312. return wait_for(g4x_reset_complete(pdev), 500);
  1313. }
  1314. static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1315. {
  1316. struct pci_dev *pdev = dev_priv->drm.pdev;
  1317. int ret;
  1318. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1319. I915_WRITE(VDECCLK_GATE_D,
  1320. I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  1321. POSTING_READ(VDECCLK_GATE_D);
  1322. pci_write_config_byte(pdev, I915_GDRST,
  1323. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1324. ret = wait_for(g4x_reset_complete(pdev), 500);
  1325. if (ret) {
  1326. DRM_DEBUG_DRIVER("Wait for media reset failed\n");
  1327. goto out;
  1328. }
  1329. pci_write_config_byte(pdev, I915_GDRST,
  1330. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1331. ret = wait_for(g4x_reset_complete(pdev), 500);
  1332. if (ret) {
  1333. DRM_DEBUG_DRIVER("Wait for render reset failed\n");
  1334. goto out;
  1335. }
  1336. out:
  1337. pci_write_config_byte(pdev, I915_GDRST, 0);
  1338. I915_WRITE(VDECCLK_GATE_D,
  1339. I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  1340. POSTING_READ(VDECCLK_GATE_D);
  1341. return ret;
  1342. }
  1343. static int ironlake_do_reset(struct drm_i915_private *dev_priv,
  1344. unsigned engine_mask)
  1345. {
  1346. int ret;
  1347. I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  1348. ret = intel_wait_for_register(dev_priv,
  1349. ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
  1350. 500);
  1351. if (ret) {
  1352. DRM_DEBUG_DRIVER("Wait for render reset failed\n");
  1353. goto out;
  1354. }
  1355. I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  1356. ret = intel_wait_for_register(dev_priv,
  1357. ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
  1358. 500);
  1359. if (ret) {
  1360. DRM_DEBUG_DRIVER("Wait for media reset failed\n");
  1361. goto out;
  1362. }
  1363. out:
  1364. I915_WRITE(ILK_GDSR, 0);
  1365. POSTING_READ(ILK_GDSR);
  1366. return ret;
  1367. }
  1368. /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
  1369. static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
  1370. u32 hw_domain_mask)
  1371. {
  1372. int err;
  1373. /* GEN6_GDRST is not in the gt power well, no need to check
  1374. * for fifo space for the write or forcewake the chip for
  1375. * the read
  1376. */
  1377. __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
  1378. /* Wait for the device to ack the reset requests */
  1379. err = intel_wait_for_register_fw(dev_priv,
  1380. GEN6_GDRST, hw_domain_mask, 0,
  1381. 500);
  1382. if (err)
  1383. DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
  1384. hw_domain_mask);
  1385. return err;
  1386. }
  1387. /**
  1388. * gen6_reset_engines - reset individual engines
  1389. * @dev_priv: i915 device
  1390. * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
  1391. *
  1392. * This function will reset the individual engines that are set in engine_mask.
  1393. * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
  1394. *
  1395. * Note: It is responsibility of the caller to handle the difference between
  1396. * asking full domain reset versus reset for all available individual engines.
  1397. *
  1398. * Returns 0 on success, nonzero on error.
  1399. */
  1400. static int gen6_reset_engines(struct drm_i915_private *dev_priv,
  1401. unsigned engine_mask)
  1402. {
  1403. struct intel_engine_cs *engine;
  1404. const u32 hw_engine_mask[I915_NUM_ENGINES] = {
  1405. [RCS] = GEN6_GRDOM_RENDER,
  1406. [BCS] = GEN6_GRDOM_BLT,
  1407. [VCS] = GEN6_GRDOM_MEDIA,
  1408. [VCS2] = GEN8_GRDOM_MEDIA2,
  1409. [VECS] = GEN6_GRDOM_VECS,
  1410. };
  1411. u32 hw_mask;
  1412. if (engine_mask == ALL_ENGINES) {
  1413. hw_mask = GEN6_GRDOM_FULL;
  1414. } else {
  1415. unsigned int tmp;
  1416. hw_mask = 0;
  1417. for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
  1418. hw_mask |= hw_engine_mask[engine->id];
  1419. }
  1420. return gen6_hw_domain_reset(dev_priv, hw_mask);
  1421. }
  1422. /**
  1423. * __intel_wait_for_register_fw - wait until register matches expected state
  1424. * @dev_priv: the i915 device
  1425. * @reg: the register to read
  1426. * @mask: mask to apply to register value
  1427. * @value: expected value
  1428. * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
  1429. * @slow_timeout_ms: slow timeout in millisecond
  1430. * @out_value: optional placeholder to hold registry value
  1431. *
  1432. * This routine waits until the target register @reg contains the expected
  1433. * @value after applying the @mask, i.e. it waits until ::
  1434. *
  1435. * (I915_READ_FW(reg) & mask) == value
  1436. *
  1437. * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
  1438. * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
  1439. * must be not larger than 20,0000 microseconds.
  1440. *
  1441. * Note that this routine assumes the caller holds forcewake asserted, it is
  1442. * not suitable for very long waits. See intel_wait_for_register() if you
  1443. * wish to wait without holding forcewake for the duration (i.e. you expect
  1444. * the wait to be slow).
  1445. *
  1446. * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
  1447. */
  1448. int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
  1449. i915_reg_t reg,
  1450. u32 mask,
  1451. u32 value,
  1452. unsigned int fast_timeout_us,
  1453. unsigned int slow_timeout_ms,
  1454. u32 *out_value)
  1455. {
  1456. u32 uninitialized_var(reg_value);
  1457. #define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
  1458. int ret;
  1459. /* Catch any overuse of this function */
  1460. might_sleep_if(slow_timeout_ms);
  1461. GEM_BUG_ON(fast_timeout_us > 20000);
  1462. ret = -ETIMEDOUT;
  1463. if (fast_timeout_us && fast_timeout_us <= 20000)
  1464. ret = _wait_for_atomic(done, fast_timeout_us, 0);
  1465. if (ret && slow_timeout_ms)
  1466. ret = wait_for(done, slow_timeout_ms);
  1467. if (out_value)
  1468. *out_value = reg_value;
  1469. return ret;
  1470. #undef done
  1471. }
  1472. /**
  1473. * intel_wait_for_register - wait until register matches expected state
  1474. * @dev_priv: the i915 device
  1475. * @reg: the register to read
  1476. * @mask: mask to apply to register value
  1477. * @value: expected value
  1478. * @timeout_ms: timeout in millisecond
  1479. *
  1480. * This routine waits until the target register @reg contains the expected
  1481. * @value after applying the @mask, i.e. it waits until ::
  1482. *
  1483. * (I915_READ(reg) & mask) == value
  1484. *
  1485. * Otherwise, the wait will timeout after @timeout_ms milliseconds.
  1486. *
  1487. * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
  1488. */
  1489. int intel_wait_for_register(struct drm_i915_private *dev_priv,
  1490. i915_reg_t reg,
  1491. u32 mask,
  1492. u32 value,
  1493. unsigned int timeout_ms)
  1494. {
  1495. unsigned fw =
  1496. intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
  1497. int ret;
  1498. might_sleep();
  1499. spin_lock_irq(&dev_priv->uncore.lock);
  1500. intel_uncore_forcewake_get__locked(dev_priv, fw);
  1501. ret = __intel_wait_for_register_fw(dev_priv,
  1502. reg, mask, value,
  1503. 2, 0, NULL);
  1504. intel_uncore_forcewake_put__locked(dev_priv, fw);
  1505. spin_unlock_irq(&dev_priv->uncore.lock);
  1506. if (ret)
  1507. ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
  1508. timeout_ms);
  1509. return ret;
  1510. }
  1511. static int gen8_reset_engine_start(struct intel_engine_cs *engine)
  1512. {
  1513. struct drm_i915_private *dev_priv = engine->i915;
  1514. int ret;
  1515. I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
  1516. _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
  1517. ret = intel_wait_for_register_fw(dev_priv,
  1518. RING_RESET_CTL(engine->mmio_base),
  1519. RESET_CTL_READY_TO_RESET,
  1520. RESET_CTL_READY_TO_RESET,
  1521. 700);
  1522. if (ret)
  1523. DRM_ERROR("%s: reset request timeout\n", engine->name);
  1524. return ret;
  1525. }
  1526. static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
  1527. {
  1528. struct drm_i915_private *dev_priv = engine->i915;
  1529. I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
  1530. _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
  1531. }
  1532. static int gen8_reset_engines(struct drm_i915_private *dev_priv,
  1533. unsigned engine_mask)
  1534. {
  1535. struct intel_engine_cs *engine;
  1536. unsigned int tmp;
  1537. for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
  1538. if (gen8_reset_engine_start(engine))
  1539. goto not_ready;
  1540. return gen6_reset_engines(dev_priv, engine_mask);
  1541. not_ready:
  1542. for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
  1543. gen8_reset_engine_cancel(engine);
  1544. return -EIO;
  1545. }
  1546. typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
  1547. static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
  1548. {
  1549. if (!i915_modparams.reset)
  1550. return NULL;
  1551. if (INTEL_INFO(dev_priv)->gen >= 8)
  1552. return gen8_reset_engines;
  1553. else if (INTEL_INFO(dev_priv)->gen >= 6)
  1554. return gen6_reset_engines;
  1555. else if (IS_GEN5(dev_priv))
  1556. return ironlake_do_reset;
  1557. else if (IS_G4X(dev_priv))
  1558. return g4x_do_reset;
  1559. else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
  1560. return g33_do_reset;
  1561. else if (INTEL_INFO(dev_priv)->gen >= 3)
  1562. return i915_do_reset;
  1563. else
  1564. return NULL;
  1565. }
  1566. int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
  1567. {
  1568. reset_func reset = intel_get_gpu_reset(dev_priv);
  1569. int retry;
  1570. int ret;
  1571. might_sleep();
  1572. /* If the power well sleeps during the reset, the reset
  1573. * request may be dropped and never completes (causing -EIO).
  1574. */
  1575. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1576. for (retry = 0; retry < 3; retry++) {
  1577. /* We stop engines, otherwise we might get failed reset and a
  1578. * dead gpu (on elk). Also as modern gpu as kbl can suffer
  1579. * from system hang if batchbuffer is progressing when
  1580. * the reset is issued, regardless of READY_TO_RESET ack.
  1581. * Thus assume it is best to stop engines on all gens
  1582. * where we have a gpu reset.
  1583. *
  1584. * WaMediaResetMainRingCleanup:ctg,elk (presumably)
  1585. *
  1586. * FIXME: Wa for more modern gens needs to be validated
  1587. */
  1588. i915_stop_engines(dev_priv, engine_mask);
  1589. ret = -ENODEV;
  1590. if (reset)
  1591. ret = reset(dev_priv, engine_mask);
  1592. if (ret != -ETIMEDOUT)
  1593. break;
  1594. cond_resched();
  1595. }
  1596. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1597. return ret;
  1598. }
  1599. bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
  1600. {
  1601. return intel_get_gpu_reset(dev_priv) != NULL;
  1602. }
  1603. bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
  1604. {
  1605. return (dev_priv->info.has_reset_engine &&
  1606. i915_modparams.reset >= 2);
  1607. }
  1608. int intel_reset_guc(struct drm_i915_private *dev_priv)
  1609. {
  1610. int ret;
  1611. if (!HAS_GUC(dev_priv))
  1612. return -EINVAL;
  1613. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1614. ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
  1615. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1616. return ret;
  1617. }
  1618. bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
  1619. {
  1620. return check_for_unclaimed_mmio(dev_priv);
  1621. }
  1622. bool
  1623. intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
  1624. {
  1625. if (unlikely(i915_modparams.mmio_debug ||
  1626. dev_priv->uncore.unclaimed_mmio_check <= 0))
  1627. return false;
  1628. if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
  1629. DRM_DEBUG("Unclaimed register detected, "
  1630. "enabling oneshot unclaimed register reporting. "
  1631. "Please use i915.mmio_debug=N for more information.\n");
  1632. i915_modparams.mmio_debug++;
  1633. dev_priv->uncore.unclaimed_mmio_check--;
  1634. return true;
  1635. }
  1636. return false;
  1637. }
  1638. static enum forcewake_domains
  1639. intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
  1640. i915_reg_t reg)
  1641. {
  1642. u32 offset = i915_mmio_reg_offset(reg);
  1643. enum forcewake_domains fw_domains;
  1644. if (HAS_FWTABLE(dev_priv)) {
  1645. fw_domains = __fwtable_reg_read_fw_domains(offset);
  1646. } else if (INTEL_GEN(dev_priv) >= 6) {
  1647. fw_domains = __gen6_reg_read_fw_domains(offset);
  1648. } else {
  1649. WARN_ON(!IS_GEN(dev_priv, 2, 5));
  1650. fw_domains = 0;
  1651. }
  1652. WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
  1653. return fw_domains;
  1654. }
  1655. static enum forcewake_domains
  1656. intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
  1657. i915_reg_t reg)
  1658. {
  1659. u32 offset = i915_mmio_reg_offset(reg);
  1660. enum forcewake_domains fw_domains;
  1661. if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
  1662. fw_domains = __fwtable_reg_write_fw_domains(offset);
  1663. } else if (IS_GEN8(dev_priv)) {
  1664. fw_domains = __gen8_reg_write_fw_domains(offset);
  1665. } else if (IS_GEN(dev_priv, 6, 7)) {
  1666. fw_domains = FORCEWAKE_RENDER;
  1667. } else {
  1668. WARN_ON(!IS_GEN(dev_priv, 2, 5));
  1669. fw_domains = 0;
  1670. }
  1671. WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
  1672. return fw_domains;
  1673. }
  1674. /**
  1675. * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
  1676. * a register
  1677. * @dev_priv: pointer to struct drm_i915_private
  1678. * @reg: register in question
  1679. * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
  1680. *
  1681. * Returns a set of forcewake domains required to be taken with for example
  1682. * intel_uncore_forcewake_get for the specified register to be accessible in the
  1683. * specified mode (read, write or read/write) with raw mmio accessors.
  1684. *
  1685. * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
  1686. * callers to do FIFO management on their own or risk losing writes.
  1687. */
  1688. enum forcewake_domains
  1689. intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
  1690. i915_reg_t reg, unsigned int op)
  1691. {
  1692. enum forcewake_domains fw_domains = 0;
  1693. WARN_ON(!op);
  1694. if (intel_vgpu_active(dev_priv))
  1695. return 0;
  1696. if (op & FW_REG_READ)
  1697. fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
  1698. if (op & FW_REG_WRITE)
  1699. fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
  1700. return fw_domains;
  1701. }
  1702. #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  1703. #include "selftests/mock_uncore.c"
  1704. #include "selftests/intel_uncore.c"
  1705. #endif