i915_drv.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790
  1. /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
  2. */
  3. /*
  4. *
  5. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the
  10. * "Software"), to deal in the Software without restriction, including
  11. * without limitation the rights to use, copy, modify, merge, publish,
  12. * distribute, sub license, and/or sell copies of the Software, and to
  13. * permit persons to whom the Software is furnished to do so, subject to
  14. * the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the
  17. * next paragraph) shall be included in all copies or substantial portions
  18. * of the Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27. *
  28. */
  29. #include <linux/device.h>
  30. #include <linux/acpi.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_trace.h"
  35. #include "intel_drv.h"
  36. #include <linux/apple-gmux.h>
  37. #include <linux/console.h>
  38. #include <linux/module.h>
  39. #include <linux/pm_runtime.h>
  40. #include <linux/vgaarb.h>
  41. #include <linux/vga_switcheroo.h>
  42. #include <drm/drm_crtc_helper.h>
  43. static struct drm_driver driver;
  44. #define GEN_DEFAULT_PIPEOFFSETS \
  45. .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
  46. PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
  47. .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
  48. TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
  49. .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
  50. #define GEN_CHV_PIPEOFFSETS \
  51. .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
  52. CHV_PIPE_C_OFFSET }, \
  53. .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
  54. CHV_TRANSCODER_C_OFFSET, }, \
  55. .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
  56. CHV_PALETTE_C_OFFSET }
  57. #define CURSOR_OFFSETS \
  58. .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
  59. #define IVB_CURSOR_OFFSETS \
  60. .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
  61. #define BDW_COLORS \
  62. .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
  63. #define CHV_COLORS \
  64. .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
  65. static const struct intel_device_info intel_i830_info = {
  66. .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
  67. .has_overlay = 1, .overlay_needs_physical = 1,
  68. .ring_mask = RENDER_RING,
  69. GEN_DEFAULT_PIPEOFFSETS,
  70. CURSOR_OFFSETS,
  71. };
  72. static const struct intel_device_info intel_845g_info = {
  73. .gen = 2, .num_pipes = 1,
  74. .has_overlay = 1, .overlay_needs_physical = 1,
  75. .ring_mask = RENDER_RING,
  76. GEN_DEFAULT_PIPEOFFSETS,
  77. CURSOR_OFFSETS,
  78. };
  79. static const struct intel_device_info intel_i85x_info = {
  80. .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
  81. .cursor_needs_physical = 1,
  82. .has_overlay = 1, .overlay_needs_physical = 1,
  83. .has_fbc = 1,
  84. .ring_mask = RENDER_RING,
  85. GEN_DEFAULT_PIPEOFFSETS,
  86. CURSOR_OFFSETS,
  87. };
  88. static const struct intel_device_info intel_i865g_info = {
  89. .gen = 2, .num_pipes = 1,
  90. .has_overlay = 1, .overlay_needs_physical = 1,
  91. .ring_mask = RENDER_RING,
  92. GEN_DEFAULT_PIPEOFFSETS,
  93. CURSOR_OFFSETS,
  94. };
  95. static const struct intel_device_info intel_i915g_info = {
  96. .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
  97. .has_overlay = 1, .overlay_needs_physical = 1,
  98. .ring_mask = RENDER_RING,
  99. GEN_DEFAULT_PIPEOFFSETS,
  100. CURSOR_OFFSETS,
  101. };
  102. static const struct intel_device_info intel_i915gm_info = {
  103. .gen = 3, .is_mobile = 1, .num_pipes = 2,
  104. .cursor_needs_physical = 1,
  105. .has_overlay = 1, .overlay_needs_physical = 1,
  106. .supports_tv = 1,
  107. .has_fbc = 1,
  108. .ring_mask = RENDER_RING,
  109. GEN_DEFAULT_PIPEOFFSETS,
  110. CURSOR_OFFSETS,
  111. };
  112. static const struct intel_device_info intel_i945g_info = {
  113. .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
  114. .has_overlay = 1, .overlay_needs_physical = 1,
  115. .ring_mask = RENDER_RING,
  116. GEN_DEFAULT_PIPEOFFSETS,
  117. CURSOR_OFFSETS,
  118. };
  119. static const struct intel_device_info intel_i945gm_info = {
  120. .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
  121. .has_hotplug = 1, .cursor_needs_physical = 1,
  122. .has_overlay = 1, .overlay_needs_physical = 1,
  123. .supports_tv = 1,
  124. .has_fbc = 1,
  125. .ring_mask = RENDER_RING,
  126. GEN_DEFAULT_PIPEOFFSETS,
  127. CURSOR_OFFSETS,
  128. };
  129. static const struct intel_device_info intel_i965g_info = {
  130. .gen = 4, .is_broadwater = 1, .num_pipes = 2,
  131. .has_hotplug = 1,
  132. .has_overlay = 1,
  133. .ring_mask = RENDER_RING,
  134. GEN_DEFAULT_PIPEOFFSETS,
  135. CURSOR_OFFSETS,
  136. };
  137. static const struct intel_device_info intel_i965gm_info = {
  138. .gen = 4, .is_crestline = 1, .num_pipes = 2,
  139. .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
  140. .has_overlay = 1,
  141. .supports_tv = 1,
  142. .ring_mask = RENDER_RING,
  143. GEN_DEFAULT_PIPEOFFSETS,
  144. CURSOR_OFFSETS,
  145. };
  146. static const struct intel_device_info intel_g33_info = {
  147. .gen = 3, .is_g33 = 1, .num_pipes = 2,
  148. .need_gfx_hws = 1, .has_hotplug = 1,
  149. .has_overlay = 1,
  150. .ring_mask = RENDER_RING,
  151. GEN_DEFAULT_PIPEOFFSETS,
  152. CURSOR_OFFSETS,
  153. };
  154. static const struct intel_device_info intel_g45_info = {
  155. .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
  156. .has_pipe_cxsr = 1, .has_hotplug = 1,
  157. .ring_mask = RENDER_RING | BSD_RING,
  158. GEN_DEFAULT_PIPEOFFSETS,
  159. CURSOR_OFFSETS,
  160. };
  161. static const struct intel_device_info intel_gm45_info = {
  162. .gen = 4, .is_g4x = 1, .num_pipes = 2,
  163. .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
  164. .has_pipe_cxsr = 1, .has_hotplug = 1,
  165. .supports_tv = 1,
  166. .ring_mask = RENDER_RING | BSD_RING,
  167. GEN_DEFAULT_PIPEOFFSETS,
  168. CURSOR_OFFSETS,
  169. };
  170. static const struct intel_device_info intel_pineview_info = {
  171. .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
  172. .need_gfx_hws = 1, .has_hotplug = 1,
  173. .has_overlay = 1,
  174. GEN_DEFAULT_PIPEOFFSETS,
  175. CURSOR_OFFSETS,
  176. };
  177. static const struct intel_device_info intel_ironlake_d_info = {
  178. .gen = 5, .num_pipes = 2,
  179. .need_gfx_hws = 1, .has_hotplug = 1,
  180. .ring_mask = RENDER_RING | BSD_RING,
  181. GEN_DEFAULT_PIPEOFFSETS,
  182. CURSOR_OFFSETS,
  183. };
  184. static const struct intel_device_info intel_ironlake_m_info = {
  185. .gen = 5, .is_mobile = 1, .num_pipes = 2,
  186. .need_gfx_hws = 1, .has_hotplug = 1,
  187. .has_fbc = 1,
  188. .ring_mask = RENDER_RING | BSD_RING,
  189. GEN_DEFAULT_PIPEOFFSETS,
  190. CURSOR_OFFSETS,
  191. };
  192. static const struct intel_device_info intel_sandybridge_d_info = {
  193. .gen = 6, .num_pipes = 2,
  194. .need_gfx_hws = 1, .has_hotplug = 1,
  195. .has_fbc = 1,
  196. .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
  197. .has_llc = 1,
  198. GEN_DEFAULT_PIPEOFFSETS,
  199. CURSOR_OFFSETS,
  200. };
  201. static const struct intel_device_info intel_sandybridge_m_info = {
  202. .gen = 6, .is_mobile = 1, .num_pipes = 2,
  203. .need_gfx_hws = 1, .has_hotplug = 1,
  204. .has_fbc = 1,
  205. .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
  206. .has_llc = 1,
  207. GEN_DEFAULT_PIPEOFFSETS,
  208. CURSOR_OFFSETS,
  209. };
  210. #define GEN7_FEATURES \
  211. .gen = 7, .num_pipes = 3, \
  212. .need_gfx_hws = 1, .has_hotplug = 1, \
  213. .has_fbc = 1, \
  214. .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
  215. .has_llc = 1, \
  216. GEN_DEFAULT_PIPEOFFSETS, \
  217. IVB_CURSOR_OFFSETS
  218. static const struct intel_device_info intel_ivybridge_d_info = {
  219. GEN7_FEATURES,
  220. .is_ivybridge = 1,
  221. };
  222. static const struct intel_device_info intel_ivybridge_m_info = {
  223. GEN7_FEATURES,
  224. .is_ivybridge = 1,
  225. .is_mobile = 1,
  226. };
  227. static const struct intel_device_info intel_ivybridge_q_info = {
  228. GEN7_FEATURES,
  229. .is_ivybridge = 1,
  230. .num_pipes = 0, /* legal, last one wins */
  231. };
  232. #define VLV_FEATURES \
  233. .gen = 7, .num_pipes = 2, \
  234. .need_gfx_hws = 1, .has_hotplug = 1, \
  235. .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
  236. .display_mmio_offset = VLV_DISPLAY_BASE, \
  237. GEN_DEFAULT_PIPEOFFSETS, \
  238. CURSOR_OFFSETS
  239. static const struct intel_device_info intel_valleyview_m_info = {
  240. VLV_FEATURES,
  241. .is_valleyview = 1,
  242. .is_mobile = 1,
  243. };
  244. static const struct intel_device_info intel_valleyview_d_info = {
  245. VLV_FEATURES,
  246. .is_valleyview = 1,
  247. };
  248. #define HSW_FEATURES \
  249. GEN7_FEATURES, \
  250. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
  251. .has_ddi = 1, \
  252. .has_fpga_dbg = 1
  253. static const struct intel_device_info intel_haswell_d_info = {
  254. HSW_FEATURES,
  255. .is_haswell = 1,
  256. };
  257. static const struct intel_device_info intel_haswell_m_info = {
  258. HSW_FEATURES,
  259. .is_haswell = 1,
  260. .is_mobile = 1,
  261. };
  262. #define BDW_FEATURES \
  263. HSW_FEATURES, \
  264. BDW_COLORS
  265. static const struct intel_device_info intel_broadwell_d_info = {
  266. BDW_FEATURES,
  267. .gen = 8,
  268. };
  269. static const struct intel_device_info intel_broadwell_m_info = {
  270. BDW_FEATURES,
  271. .gen = 8, .is_mobile = 1,
  272. };
  273. static const struct intel_device_info intel_broadwell_gt3d_info = {
  274. BDW_FEATURES,
  275. .gen = 8,
  276. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  277. };
  278. static const struct intel_device_info intel_broadwell_gt3m_info = {
  279. BDW_FEATURES,
  280. .gen = 8, .is_mobile = 1,
  281. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  282. };
  283. static const struct intel_device_info intel_cherryview_info = {
  284. .gen = 8, .num_pipes = 3,
  285. .need_gfx_hws = 1, .has_hotplug = 1,
  286. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  287. .is_cherryview = 1,
  288. .display_mmio_offset = VLV_DISPLAY_BASE,
  289. GEN_CHV_PIPEOFFSETS,
  290. CURSOR_OFFSETS,
  291. CHV_COLORS,
  292. };
  293. static const struct intel_device_info intel_skylake_info = {
  294. BDW_FEATURES,
  295. .is_skylake = 1,
  296. .gen = 9,
  297. };
  298. static const struct intel_device_info intel_skylake_gt3_info = {
  299. BDW_FEATURES,
  300. .is_skylake = 1,
  301. .gen = 9,
  302. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  303. };
  304. static const struct intel_device_info intel_broxton_info = {
  305. .is_preliminary = 1,
  306. .is_broxton = 1,
  307. .gen = 9,
  308. .need_gfx_hws = 1, .has_hotplug = 1,
  309. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  310. .num_pipes = 3,
  311. .has_ddi = 1,
  312. .has_fpga_dbg = 1,
  313. .has_fbc = 1,
  314. GEN_DEFAULT_PIPEOFFSETS,
  315. IVB_CURSOR_OFFSETS,
  316. BDW_COLORS,
  317. };
  318. static const struct intel_device_info intel_kabylake_info = {
  319. BDW_FEATURES,
  320. .is_kabylake = 1,
  321. .gen = 9,
  322. };
  323. static const struct intel_device_info intel_kabylake_gt3_info = {
  324. BDW_FEATURES,
  325. .is_kabylake = 1,
  326. .gen = 9,
  327. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  328. };
  329. /*
  330. * Make sure any device matches here are from most specific to most
  331. * general. For example, since the Quanta match is based on the subsystem
  332. * and subvendor IDs, we need it to come before the more general IVB
  333. * PCI ID matches, otherwise we'll use the wrong info struct above.
  334. */
  335. static const struct pci_device_id pciidlist[] = {
  336. INTEL_I830_IDS(&intel_i830_info),
  337. INTEL_I845G_IDS(&intel_845g_info),
  338. INTEL_I85X_IDS(&intel_i85x_info),
  339. INTEL_I865G_IDS(&intel_i865g_info),
  340. INTEL_I915G_IDS(&intel_i915g_info),
  341. INTEL_I915GM_IDS(&intel_i915gm_info),
  342. INTEL_I945G_IDS(&intel_i945g_info),
  343. INTEL_I945GM_IDS(&intel_i945gm_info),
  344. INTEL_I965G_IDS(&intel_i965g_info),
  345. INTEL_G33_IDS(&intel_g33_info),
  346. INTEL_I965GM_IDS(&intel_i965gm_info),
  347. INTEL_GM45_IDS(&intel_gm45_info),
  348. INTEL_G45_IDS(&intel_g45_info),
  349. INTEL_PINEVIEW_IDS(&intel_pineview_info),
  350. INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
  351. INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
  352. INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
  353. INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
  354. INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
  355. INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
  356. INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
  357. INTEL_HSW_D_IDS(&intel_haswell_d_info),
  358. INTEL_HSW_M_IDS(&intel_haswell_m_info),
  359. INTEL_VLV_M_IDS(&intel_valleyview_m_info),
  360. INTEL_VLV_D_IDS(&intel_valleyview_d_info),
  361. INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
  362. INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
  363. INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
  364. INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
  365. INTEL_CHV_IDS(&intel_cherryview_info),
  366. INTEL_SKL_GT1_IDS(&intel_skylake_info),
  367. INTEL_SKL_GT2_IDS(&intel_skylake_info),
  368. INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
  369. INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
  370. INTEL_BXT_IDS(&intel_broxton_info),
  371. INTEL_KBL_GT1_IDS(&intel_kabylake_info),
  372. INTEL_KBL_GT2_IDS(&intel_kabylake_info),
  373. INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
  374. INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
  375. {0, 0, 0}
  376. };
  377. MODULE_DEVICE_TABLE(pci, pciidlist);
  378. static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
  379. {
  380. enum intel_pch ret = PCH_NOP;
  381. /*
  382. * In a virtualized passthrough environment we can be in a
  383. * setup where the ISA bridge is not able to be passed through.
  384. * In this case, a south bridge can be emulated and we have to
  385. * make an educated guess as to which PCH is really there.
  386. */
  387. if (IS_GEN5(dev)) {
  388. ret = PCH_IBX;
  389. DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
  390. } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
  391. ret = PCH_CPT;
  392. DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
  393. } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  394. ret = PCH_LPT;
  395. DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
  396. } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
  397. ret = PCH_SPT;
  398. DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
  399. }
  400. return ret;
  401. }
  402. void intel_detect_pch(struct drm_device *dev)
  403. {
  404. struct drm_i915_private *dev_priv = dev->dev_private;
  405. struct pci_dev *pch = NULL;
  406. /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
  407. * (which really amounts to a PCH but no South Display).
  408. */
  409. if (INTEL_INFO(dev)->num_pipes == 0) {
  410. dev_priv->pch_type = PCH_NOP;
  411. return;
  412. }
  413. /*
  414. * The reason to probe ISA bridge instead of Dev31:Fun0 is to
  415. * make graphics device passthrough work easy for VMM, that only
  416. * need to expose ISA bridge to let driver know the real hardware
  417. * underneath. This is a requirement from virtualization team.
  418. *
  419. * In some virtualized environments (e.g. XEN), there is irrelevant
  420. * ISA bridge in the system. To work reliably, we should scan trhough
  421. * all the ISA bridge devices and check for the first match, instead
  422. * of only checking the first one.
  423. */
  424. while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
  425. if (pch->vendor == PCI_VENDOR_ID_INTEL) {
  426. unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
  427. dev_priv->pch_id = id;
  428. if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
  429. dev_priv->pch_type = PCH_IBX;
  430. DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
  431. WARN_ON(!IS_GEN5(dev));
  432. } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
  433. dev_priv->pch_type = PCH_CPT;
  434. DRM_DEBUG_KMS("Found CougarPoint PCH\n");
  435. WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
  436. } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
  437. /* PantherPoint is CPT compatible */
  438. dev_priv->pch_type = PCH_CPT;
  439. DRM_DEBUG_KMS("Found PantherPoint PCH\n");
  440. WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
  441. } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
  442. dev_priv->pch_type = PCH_LPT;
  443. DRM_DEBUG_KMS("Found LynxPoint PCH\n");
  444. WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
  445. WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
  446. } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
  447. dev_priv->pch_type = PCH_LPT;
  448. DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
  449. WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
  450. WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
  451. } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
  452. dev_priv->pch_type = PCH_SPT;
  453. DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
  454. WARN_ON(!IS_SKYLAKE(dev) &&
  455. !IS_KABYLAKE(dev));
  456. } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
  457. dev_priv->pch_type = PCH_SPT;
  458. DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
  459. WARN_ON(!IS_SKYLAKE(dev) &&
  460. !IS_KABYLAKE(dev));
  461. } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
  462. (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
  463. ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
  464. pch->subsystem_vendor == 0x1af4 &&
  465. pch->subsystem_device == 0x1100)) {
  466. dev_priv->pch_type = intel_virt_detect_pch(dev);
  467. } else
  468. continue;
  469. break;
  470. }
  471. }
  472. if (!pch)
  473. DRM_DEBUG_KMS("No PCH found.\n");
  474. pci_dev_put(pch);
  475. }
  476. bool i915_semaphore_is_enabled(struct drm_device *dev)
  477. {
  478. if (INTEL_INFO(dev)->gen < 6)
  479. return false;
  480. if (i915.semaphores >= 0)
  481. return i915.semaphores;
  482. /* TODO: make semaphores and Execlists play nicely together */
  483. if (i915.enable_execlists)
  484. return false;
  485. #ifdef CONFIG_INTEL_IOMMU
  486. /* Enable semaphores on SNB when IO remapping is off */
  487. if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
  488. return false;
  489. #endif
  490. return true;
  491. }
  492. static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
  493. {
  494. struct drm_device *dev = dev_priv->dev;
  495. struct intel_encoder *encoder;
  496. drm_modeset_lock_all(dev);
  497. for_each_intel_encoder(dev, encoder)
  498. if (encoder->suspend)
  499. encoder->suspend(encoder);
  500. drm_modeset_unlock_all(dev);
  501. }
  502. static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
  503. bool rpm_resume);
  504. static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
  505. static bool suspend_to_idle(struct drm_i915_private *dev_priv)
  506. {
  507. #if IS_ENABLED(CONFIG_ACPI_SLEEP)
  508. if (acpi_target_system_state() < ACPI_STATE_S3)
  509. return true;
  510. #endif
  511. return false;
  512. }
  513. static int i915_drm_suspend(struct drm_device *dev)
  514. {
  515. struct drm_i915_private *dev_priv = dev->dev_private;
  516. pci_power_t opregion_target_state;
  517. int error;
  518. /* ignore lid events during suspend */
  519. mutex_lock(&dev_priv->modeset_restore_lock);
  520. dev_priv->modeset_restore = MODESET_SUSPENDED;
  521. mutex_unlock(&dev_priv->modeset_restore_lock);
  522. disable_rpm_wakeref_asserts(dev_priv);
  523. /* We do a lot of poking in a lot of registers, make sure they work
  524. * properly. */
  525. intel_display_set_init_power(dev_priv, true);
  526. drm_kms_helper_poll_disable(dev);
  527. pci_save_state(dev->pdev);
  528. error = i915_gem_suspend(dev);
  529. if (error) {
  530. dev_err(&dev->pdev->dev,
  531. "GEM idle failed, resume might fail\n");
  532. goto out;
  533. }
  534. intel_guc_suspend(dev);
  535. intel_suspend_gt_powersave(dev);
  536. intel_display_suspend(dev);
  537. intel_dp_mst_suspend(dev);
  538. intel_runtime_pm_disable_interrupts(dev_priv);
  539. intel_hpd_cancel_work(dev_priv);
  540. intel_suspend_encoders(dev_priv);
  541. intel_suspend_hw(dev);
  542. i915_gem_suspend_gtt_mappings(dev);
  543. i915_save_state(dev);
  544. opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
  545. intel_opregion_notify_adapter(dev, opregion_target_state);
  546. intel_uncore_forcewake_reset(dev, false);
  547. intel_opregion_fini(dev);
  548. intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
  549. dev_priv->suspend_count++;
  550. intel_display_set_init_power(dev_priv, false);
  551. intel_csr_ucode_suspend(dev_priv);
  552. out:
  553. enable_rpm_wakeref_asserts(dev_priv);
  554. return error;
  555. }
  556. static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
  557. {
  558. struct drm_i915_private *dev_priv = drm_dev->dev_private;
  559. bool fw_csr;
  560. int ret;
  561. disable_rpm_wakeref_asserts(dev_priv);
  562. fw_csr = !IS_BROXTON(dev_priv) &&
  563. suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
  564. /*
  565. * In case of firmware assisted context save/restore don't manually
  566. * deinit the power domains. This also means the CSR/DMC firmware will
  567. * stay active, it will power down any HW resources as required and
  568. * also enable deeper system power states that would be blocked if the
  569. * firmware was inactive.
  570. */
  571. if (!fw_csr)
  572. intel_power_domains_suspend(dev_priv);
  573. ret = 0;
  574. if (IS_BROXTON(dev_priv))
  575. bxt_enable_dc9(dev_priv);
  576. else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  577. hsw_enable_pc8(dev_priv);
  578. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  579. ret = vlv_suspend_complete(dev_priv);
  580. if (ret) {
  581. DRM_ERROR("Suspend complete failed: %d\n", ret);
  582. if (!fw_csr)
  583. intel_power_domains_init_hw(dev_priv, true);
  584. goto out;
  585. }
  586. pci_disable_device(drm_dev->pdev);
  587. /*
  588. * During hibernation on some platforms the BIOS may try to access
  589. * the device even though it's already in D3 and hang the machine. So
  590. * leave the device in D0 on those platforms and hope the BIOS will
  591. * power down the device properly. The issue was seen on multiple old
  592. * GENs with different BIOS vendors, so having an explicit blacklist
  593. * is inpractical; apply the workaround on everything pre GEN6. The
  594. * platforms where the issue was seen:
  595. * Lenovo Thinkpad X301, X61s, X60, T60, X41
  596. * Fujitsu FSC S7110
  597. * Acer Aspire 1830T
  598. */
  599. if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
  600. pci_set_power_state(drm_dev->pdev, PCI_D3hot);
  601. dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
  602. out:
  603. enable_rpm_wakeref_asserts(dev_priv);
  604. return ret;
  605. }
  606. int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
  607. {
  608. int error;
  609. if (!dev || !dev->dev_private) {
  610. DRM_ERROR("dev: %p\n", dev);
  611. DRM_ERROR("DRM not initialized, aborting suspend.\n");
  612. return -ENODEV;
  613. }
  614. if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
  615. state.event != PM_EVENT_FREEZE))
  616. return -EINVAL;
  617. if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  618. return 0;
  619. error = i915_drm_suspend(dev);
  620. if (error)
  621. return error;
  622. return i915_drm_suspend_late(dev, false);
  623. }
  624. static int i915_drm_resume(struct drm_device *dev)
  625. {
  626. struct drm_i915_private *dev_priv = dev->dev_private;
  627. disable_rpm_wakeref_asserts(dev_priv);
  628. intel_csr_ucode_resume(dev_priv);
  629. mutex_lock(&dev->struct_mutex);
  630. i915_gem_restore_gtt_mappings(dev);
  631. mutex_unlock(&dev->struct_mutex);
  632. i915_restore_state(dev);
  633. intel_opregion_setup(dev);
  634. intel_init_pch_refclk(dev);
  635. drm_mode_config_reset(dev);
  636. /*
  637. * Interrupts have to be enabled before any batches are run. If not the
  638. * GPU will hang. i915_gem_init_hw() will initiate batches to
  639. * update/restore the context.
  640. *
  641. * Modeset enabling in intel_modeset_init_hw() also needs working
  642. * interrupts.
  643. */
  644. intel_runtime_pm_enable_interrupts(dev_priv);
  645. mutex_lock(&dev->struct_mutex);
  646. if (i915_gem_init_hw(dev)) {
  647. DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
  648. atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
  649. }
  650. mutex_unlock(&dev->struct_mutex);
  651. intel_guc_resume(dev);
  652. intel_modeset_init_hw(dev);
  653. spin_lock_irq(&dev_priv->irq_lock);
  654. if (dev_priv->display.hpd_irq_setup)
  655. dev_priv->display.hpd_irq_setup(dev);
  656. spin_unlock_irq(&dev_priv->irq_lock);
  657. intel_dp_mst_resume(dev);
  658. intel_display_resume(dev);
  659. /*
  660. * ... but also need to make sure that hotplug processing
  661. * doesn't cause havoc. Like in the driver load code we don't
  662. * bother with the tiny race here where we might loose hotplug
  663. * notifications.
  664. * */
  665. intel_hpd_init(dev_priv);
  666. /* Config may have changed between suspend and resume */
  667. drm_helper_hpd_irq_event(dev);
  668. intel_opregion_init(dev);
  669. intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
  670. mutex_lock(&dev_priv->modeset_restore_lock);
  671. dev_priv->modeset_restore = MODESET_DONE;
  672. mutex_unlock(&dev_priv->modeset_restore_lock);
  673. intel_opregion_notify_adapter(dev, PCI_D0);
  674. drm_kms_helper_poll_enable(dev);
  675. enable_rpm_wakeref_asserts(dev_priv);
  676. return 0;
  677. }
  678. static int i915_drm_resume_early(struct drm_device *dev)
  679. {
  680. struct drm_i915_private *dev_priv = dev->dev_private;
  681. int ret;
  682. /*
  683. * We have a resume ordering issue with the snd-hda driver also
  684. * requiring our device to be power up. Due to the lack of a
  685. * parent/child relationship we currently solve this with an early
  686. * resume hook.
  687. *
  688. * FIXME: This should be solved with a special hdmi sink device or
  689. * similar so that power domains can be employed.
  690. */
  691. /*
  692. * Note that we need to set the power state explicitly, since we
  693. * powered off the device during freeze and the PCI core won't power
  694. * it back up for us during thaw. Powering off the device during
  695. * freeze is not a hard requirement though, and during the
  696. * suspend/resume phases the PCI core makes sure we get here with the
  697. * device powered on. So in case we change our freeze logic and keep
  698. * the device powered we can also remove the following set power state
  699. * call.
  700. */
  701. ret = pci_set_power_state(dev->pdev, PCI_D0);
  702. if (ret) {
  703. DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
  704. goto out;
  705. }
  706. /*
  707. * Note that pci_enable_device() first enables any parent bridge
  708. * device and only then sets the power state for this device. The
  709. * bridge enabling is a nop though, since bridge devices are resumed
  710. * first. The order of enabling power and enabling the device is
  711. * imposed by the PCI core as described above, so here we preserve the
  712. * same order for the freeze/thaw phases.
  713. *
  714. * TODO: eventually we should remove pci_disable_device() /
  715. * pci_enable_enable_device() from suspend/resume. Due to how they
  716. * depend on the device enable refcount we can't anyway depend on them
  717. * disabling/enabling the device.
  718. */
  719. if (pci_enable_device(dev->pdev)) {
  720. ret = -EIO;
  721. goto out;
  722. }
  723. pci_set_master(dev->pdev);
  724. disable_rpm_wakeref_asserts(dev_priv);
  725. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  726. ret = vlv_resume_prepare(dev_priv, false);
  727. if (ret)
  728. DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
  729. ret);
  730. intel_uncore_early_sanitize(dev, true);
  731. if (IS_BROXTON(dev)) {
  732. if (!dev_priv->suspended_to_idle)
  733. gen9_sanitize_dc_state(dev_priv);
  734. bxt_disable_dc9(dev_priv);
  735. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  736. hsw_disable_pc8(dev_priv);
  737. }
  738. intel_uncore_sanitize(dev);
  739. if (IS_BROXTON(dev_priv) ||
  740. !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
  741. intel_power_domains_init_hw(dev_priv, true);
  742. enable_rpm_wakeref_asserts(dev_priv);
  743. out:
  744. dev_priv->suspended_to_idle = false;
  745. return ret;
  746. }
  747. int i915_resume_switcheroo(struct drm_device *dev)
  748. {
  749. int ret;
  750. if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  751. return 0;
  752. ret = i915_drm_resume_early(dev);
  753. if (ret)
  754. return ret;
  755. return i915_drm_resume(dev);
  756. }
  757. /**
  758. * i915_reset - reset chip after a hang
  759. * @dev: drm device to reset
  760. *
  761. * Reset the chip. Useful if a hang is detected. Returns zero on successful
  762. * reset or otherwise an error code.
  763. *
  764. * Procedure is fairly simple:
  765. * - reset the chip using the reset reg
  766. * - re-init context state
  767. * - re-init hardware status page
  768. * - re-init ring buffer
  769. * - re-init interrupt state
  770. * - re-init display
  771. */
  772. int i915_reset(struct drm_device *dev)
  773. {
  774. struct drm_i915_private *dev_priv = dev->dev_private;
  775. struct i915_gpu_error *error = &dev_priv->gpu_error;
  776. unsigned reset_counter;
  777. int ret;
  778. intel_reset_gt_powersave(dev);
  779. mutex_lock(&dev->struct_mutex);
  780. /* Clear any previous failed attempts at recovery. Time to try again. */
  781. atomic_andnot(I915_WEDGED, &error->reset_counter);
  782. /* Clear the reset-in-progress flag and increment the reset epoch. */
  783. reset_counter = atomic_inc_return(&error->reset_counter);
  784. if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
  785. ret = -EIO;
  786. goto error;
  787. }
  788. i915_gem_reset(dev);
  789. ret = intel_gpu_reset(dev, ALL_ENGINES);
  790. /* Also reset the gpu hangman. */
  791. if (error->stop_rings != 0) {
  792. DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
  793. error->stop_rings = 0;
  794. if (ret == -ENODEV) {
  795. DRM_INFO("Reset not implemented, but ignoring "
  796. "error for simulated gpu hangs\n");
  797. ret = 0;
  798. }
  799. }
  800. if (i915_stop_ring_allow_warn(dev_priv))
  801. pr_notice("drm/i915: Resetting chip after gpu hang\n");
  802. if (ret) {
  803. if (ret != -ENODEV)
  804. DRM_ERROR("Failed to reset chip: %i\n", ret);
  805. else
  806. DRM_DEBUG_DRIVER("GPU reset disabled\n");
  807. goto error;
  808. }
  809. intel_overlay_reset(dev_priv);
  810. /* Ok, now get things going again... */
  811. /*
  812. * Everything depends on having the GTT running, so we need to start
  813. * there. Fortunately we don't need to do this unless we reset the
  814. * chip at a PCI level.
  815. *
  816. * Next we need to restore the context, but we don't use those
  817. * yet either...
  818. *
  819. * Ring buffer needs to be re-initialized in the KMS case, or if X
  820. * was running at the time of the reset (i.e. we weren't VT
  821. * switched away).
  822. */
  823. ret = i915_gem_init_hw(dev);
  824. if (ret) {
  825. DRM_ERROR("Failed hw init on reset %d\n", ret);
  826. goto error;
  827. }
  828. mutex_unlock(&dev->struct_mutex);
  829. /*
  830. * rps/rc6 re-init is necessary to restore state lost after the
  831. * reset and the re-install of gt irqs. Skip for ironlake per
  832. * previous concerns that it doesn't respond well to some forms
  833. * of re-init after reset.
  834. */
  835. if (INTEL_INFO(dev)->gen > 5)
  836. intel_enable_gt_powersave(dev);
  837. return 0;
  838. error:
  839. atomic_or(I915_WEDGED, &error->reset_counter);
  840. mutex_unlock(&dev->struct_mutex);
  841. return ret;
  842. }
  843. static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  844. {
  845. struct intel_device_info *intel_info =
  846. (struct intel_device_info *) ent->driver_data;
  847. if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
  848. DRM_INFO("This hardware requires preliminary hardware support.\n"
  849. "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
  850. return -ENODEV;
  851. }
  852. /* Only bind to function 0 of the device. Early generations
  853. * used function 1 as a placeholder for multi-head. This causes
  854. * us confusion instead, especially on the systems where both
  855. * functions have the same PCI-ID!
  856. */
  857. if (PCI_FUNC(pdev->devfn))
  858. return -ENODEV;
  859. /*
  860. * apple-gmux is needed on dual GPU MacBook Pro
  861. * to probe the panel if we're the inactive GPU.
  862. */
  863. if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
  864. apple_gmux_present() && pdev != vga_default_device() &&
  865. !vga_switcheroo_handler_flags())
  866. return -EPROBE_DEFER;
  867. return drm_get_pci_dev(pdev, ent, &driver);
  868. }
  869. static void
  870. i915_pci_remove(struct pci_dev *pdev)
  871. {
  872. struct drm_device *dev = pci_get_drvdata(pdev);
  873. drm_put_dev(dev);
  874. }
  875. static int i915_pm_suspend(struct device *dev)
  876. {
  877. struct pci_dev *pdev = to_pci_dev(dev);
  878. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  879. if (!drm_dev || !drm_dev->dev_private) {
  880. dev_err(dev, "DRM not initialized, aborting suspend.\n");
  881. return -ENODEV;
  882. }
  883. if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  884. return 0;
  885. return i915_drm_suspend(drm_dev);
  886. }
  887. static int i915_pm_suspend_late(struct device *dev)
  888. {
  889. struct drm_device *drm_dev = dev_to_i915(dev)->dev;
  890. /*
  891. * We have a suspend ordering issue with the snd-hda driver also
  892. * requiring our device to be power up. Due to the lack of a
  893. * parent/child relationship we currently solve this with an late
  894. * suspend hook.
  895. *
  896. * FIXME: This should be solved with a special hdmi sink device or
  897. * similar so that power domains can be employed.
  898. */
  899. if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  900. return 0;
  901. return i915_drm_suspend_late(drm_dev, false);
  902. }
  903. static int i915_pm_poweroff_late(struct device *dev)
  904. {
  905. struct drm_device *drm_dev = dev_to_i915(dev)->dev;
  906. if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  907. return 0;
  908. return i915_drm_suspend_late(drm_dev, true);
  909. }
  910. static int i915_pm_resume_early(struct device *dev)
  911. {
  912. struct drm_device *drm_dev = dev_to_i915(dev)->dev;
  913. if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  914. return 0;
  915. return i915_drm_resume_early(drm_dev);
  916. }
  917. static int i915_pm_resume(struct device *dev)
  918. {
  919. struct drm_device *drm_dev = dev_to_i915(dev)->dev;
  920. if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  921. return 0;
  922. return i915_drm_resume(drm_dev);
  923. }
  924. /*
  925. * Save all Gunit registers that may be lost after a D3 and a subsequent
  926. * S0i[R123] transition. The list of registers needing a save/restore is
  927. * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
  928. * registers in the following way:
  929. * - Driver: saved/restored by the driver
  930. * - Punit : saved/restored by the Punit firmware
  931. * - No, w/o marking: no need to save/restore, since the register is R/O or
  932. * used internally by the HW in a way that doesn't depend
  933. * keeping the content across a suspend/resume.
  934. * - Debug : used for debugging
  935. *
  936. * We save/restore all registers marked with 'Driver', with the following
  937. * exceptions:
  938. * - Registers out of use, including also registers marked with 'Debug'.
  939. * These have no effect on the driver's operation, so we don't save/restore
  940. * them to reduce the overhead.
  941. * - Registers that are fully setup by an initialization function called from
  942. * the resume path. For example many clock gating and RPS/RC6 registers.
  943. * - Registers that provide the right functionality with their reset defaults.
  944. *
  945. * TODO: Except for registers that based on the above 3 criteria can be safely
  946. * ignored, we save/restore all others, practically treating the HW context as
  947. * a black-box for the driver. Further investigation is needed to reduce the
  948. * saved/restored registers even further, by following the same 3 criteria.
  949. */
  950. static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
  951. {
  952. struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
  953. int i;
  954. /* GAM 0x4000-0x4770 */
  955. s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
  956. s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
  957. s->arb_mode = I915_READ(ARB_MODE);
  958. s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
  959. s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
  960. for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
  961. s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
  962. s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
  963. s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
  964. s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
  965. s->ecochk = I915_READ(GAM_ECOCHK);
  966. s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
  967. s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
  968. s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
  969. /* MBC 0x9024-0x91D0, 0x8500 */
  970. s->g3dctl = I915_READ(VLV_G3DCTL);
  971. s->gsckgctl = I915_READ(VLV_GSCKGCTL);
  972. s->mbctl = I915_READ(GEN6_MBCTL);
  973. /* GCP 0x9400-0x9424, 0x8100-0x810C */
  974. s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
  975. s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
  976. s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
  977. s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
  978. s->rstctl = I915_READ(GEN6_RSTCTL);
  979. s->misccpctl = I915_READ(GEN7_MISCCPCTL);
  980. /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
  981. s->gfxpause = I915_READ(GEN6_GFXPAUSE);
  982. s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
  983. s->rpdeuc = I915_READ(GEN6_RPDEUC);
  984. s->ecobus = I915_READ(ECOBUS);
  985. s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
  986. s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
  987. s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
  988. s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
  989. s->rcedata = I915_READ(VLV_RCEDATA);
  990. s->spare2gh = I915_READ(VLV_SPAREG2H);
  991. /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
  992. s->gt_imr = I915_READ(GTIMR);
  993. s->gt_ier = I915_READ(GTIER);
  994. s->pm_imr = I915_READ(GEN6_PMIMR);
  995. s->pm_ier = I915_READ(GEN6_PMIER);
  996. for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
  997. s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
  998. /* GT SA CZ domain, 0x100000-0x138124 */
  999. s->tilectl = I915_READ(TILECTL);
  1000. s->gt_fifoctl = I915_READ(GTFIFOCTL);
  1001. s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
  1002. s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  1003. s->pmwgicz = I915_READ(VLV_PMWGICZ);
  1004. /* Gunit-Display CZ domain, 0x182028-0x1821CF */
  1005. s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
  1006. s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
  1007. s->pcbr = I915_READ(VLV_PCBR);
  1008. s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
  1009. /*
  1010. * Not saving any of:
  1011. * DFT, 0x9800-0x9EC0
  1012. * SARB, 0xB000-0xB1FC
  1013. * GAC, 0x5208-0x524C, 0x14000-0x14C000
  1014. * PCI CFG
  1015. */
  1016. }
  1017. static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
  1018. {
  1019. struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
  1020. u32 val;
  1021. int i;
  1022. /* GAM 0x4000-0x4770 */
  1023. I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
  1024. I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
  1025. I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
  1026. I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
  1027. I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
  1028. for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
  1029. I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
  1030. I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
  1031. I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
  1032. I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
  1033. I915_WRITE(GAM_ECOCHK, s->ecochk);
  1034. I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
  1035. I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
  1036. I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
  1037. /* MBC 0x9024-0x91D0, 0x8500 */
  1038. I915_WRITE(VLV_G3DCTL, s->g3dctl);
  1039. I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
  1040. I915_WRITE(GEN6_MBCTL, s->mbctl);
  1041. /* GCP 0x9400-0x9424, 0x8100-0x810C */
  1042. I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
  1043. I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
  1044. I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
  1045. I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
  1046. I915_WRITE(GEN6_RSTCTL, s->rstctl);
  1047. I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
  1048. /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
  1049. I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
  1050. I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
  1051. I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
  1052. I915_WRITE(ECOBUS, s->ecobus);
  1053. I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
  1054. I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
  1055. I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
  1056. I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
  1057. I915_WRITE(VLV_RCEDATA, s->rcedata);
  1058. I915_WRITE(VLV_SPAREG2H, s->spare2gh);
  1059. /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
  1060. I915_WRITE(GTIMR, s->gt_imr);
  1061. I915_WRITE(GTIER, s->gt_ier);
  1062. I915_WRITE(GEN6_PMIMR, s->pm_imr);
  1063. I915_WRITE(GEN6_PMIER, s->pm_ier);
  1064. for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
  1065. I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
  1066. /* GT SA CZ domain, 0x100000-0x138124 */
  1067. I915_WRITE(TILECTL, s->tilectl);
  1068. I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
  1069. /*
  1070. * Preserve the GT allow wake and GFX force clock bit, they are not
  1071. * be restored, as they are used to control the s0ix suspend/resume
  1072. * sequence by the caller.
  1073. */
  1074. val = I915_READ(VLV_GTLC_WAKE_CTRL);
  1075. val &= VLV_GTLC_ALLOWWAKEREQ;
  1076. val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
  1077. I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
  1078. val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  1079. val &= VLV_GFX_CLK_FORCE_ON_BIT;
  1080. val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
  1081. I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
  1082. I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
  1083. /* Gunit-Display CZ domain, 0x182028-0x1821CF */
  1084. I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
  1085. I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
  1086. I915_WRITE(VLV_PCBR, s->pcbr);
  1087. I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
  1088. }
  1089. int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
  1090. {
  1091. u32 val;
  1092. int err;
  1093. #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
  1094. val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  1095. val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
  1096. if (force_on)
  1097. val |= VLV_GFX_CLK_FORCE_ON_BIT;
  1098. I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
  1099. if (!force_on)
  1100. return 0;
  1101. err = wait_for(COND, 20);
  1102. if (err)
  1103. DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
  1104. I915_READ(VLV_GTLC_SURVIVABILITY_REG));
  1105. return err;
  1106. #undef COND
  1107. }
  1108. static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
  1109. {
  1110. u32 val;
  1111. int err = 0;
  1112. val = I915_READ(VLV_GTLC_WAKE_CTRL);
  1113. val &= ~VLV_GTLC_ALLOWWAKEREQ;
  1114. if (allow)
  1115. val |= VLV_GTLC_ALLOWWAKEREQ;
  1116. I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
  1117. POSTING_READ(VLV_GTLC_WAKE_CTRL);
  1118. #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
  1119. allow)
  1120. err = wait_for(COND, 1);
  1121. if (err)
  1122. DRM_ERROR("timeout disabling GT waking\n");
  1123. return err;
  1124. #undef COND
  1125. }
  1126. static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
  1127. bool wait_for_on)
  1128. {
  1129. u32 mask;
  1130. u32 val;
  1131. int err;
  1132. mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
  1133. val = wait_for_on ? mask : 0;
  1134. #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
  1135. if (COND)
  1136. return 0;
  1137. DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
  1138. onoff(wait_for_on),
  1139. I915_READ(VLV_GTLC_PW_STATUS));
  1140. /*
  1141. * RC6 transitioning can be delayed up to 2 msec (see
  1142. * valleyview_enable_rps), use 3 msec for safety.
  1143. */
  1144. err = wait_for(COND, 3);
  1145. if (err)
  1146. DRM_ERROR("timeout waiting for GT wells to go %s\n",
  1147. onoff(wait_for_on));
  1148. return err;
  1149. #undef COND
  1150. }
  1151. static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
  1152. {
  1153. if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
  1154. return;
  1155. DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
  1156. I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
  1157. }
  1158. static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
  1159. {
  1160. u32 mask;
  1161. int err;
  1162. /*
  1163. * Bspec defines the following GT well on flags as debug only, so
  1164. * don't treat them as hard failures.
  1165. */
  1166. (void)vlv_wait_for_gt_wells(dev_priv, false);
  1167. mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
  1168. WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
  1169. vlv_check_no_gt_access(dev_priv);
  1170. err = vlv_force_gfx_clock(dev_priv, true);
  1171. if (err)
  1172. goto err1;
  1173. err = vlv_allow_gt_wake(dev_priv, false);
  1174. if (err)
  1175. goto err2;
  1176. if (!IS_CHERRYVIEW(dev_priv))
  1177. vlv_save_gunit_s0ix_state(dev_priv);
  1178. err = vlv_force_gfx_clock(dev_priv, false);
  1179. if (err)
  1180. goto err2;
  1181. return 0;
  1182. err2:
  1183. /* For safety always re-enable waking and disable gfx clock forcing */
  1184. vlv_allow_gt_wake(dev_priv, true);
  1185. err1:
  1186. vlv_force_gfx_clock(dev_priv, false);
  1187. return err;
  1188. }
  1189. static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
  1190. bool rpm_resume)
  1191. {
  1192. struct drm_device *dev = dev_priv->dev;
  1193. int err;
  1194. int ret;
  1195. /*
  1196. * If any of the steps fail just try to continue, that's the best we
  1197. * can do at this point. Return the first error code (which will also
  1198. * leave RPM permanently disabled).
  1199. */
  1200. ret = vlv_force_gfx_clock(dev_priv, true);
  1201. if (!IS_CHERRYVIEW(dev_priv))
  1202. vlv_restore_gunit_s0ix_state(dev_priv);
  1203. err = vlv_allow_gt_wake(dev_priv, true);
  1204. if (!ret)
  1205. ret = err;
  1206. err = vlv_force_gfx_clock(dev_priv, false);
  1207. if (!ret)
  1208. ret = err;
  1209. vlv_check_no_gt_access(dev_priv);
  1210. if (rpm_resume) {
  1211. intel_init_clock_gating(dev);
  1212. i915_gem_restore_fences(dev);
  1213. }
  1214. return ret;
  1215. }
  1216. static int intel_runtime_suspend(struct device *device)
  1217. {
  1218. struct pci_dev *pdev = to_pci_dev(device);
  1219. struct drm_device *dev = pci_get_drvdata(pdev);
  1220. struct drm_i915_private *dev_priv = dev->dev_private;
  1221. int ret;
  1222. if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
  1223. return -ENODEV;
  1224. if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
  1225. return -ENODEV;
  1226. DRM_DEBUG_KMS("Suspending device\n");
  1227. /*
  1228. * We could deadlock here in case another thread holding struct_mutex
  1229. * calls RPM suspend concurrently, since the RPM suspend will wait
  1230. * first for this RPM suspend to finish. In this case the concurrent
  1231. * RPM resume will be followed by its RPM suspend counterpart. Still
  1232. * for consistency return -EAGAIN, which will reschedule this suspend.
  1233. */
  1234. if (!mutex_trylock(&dev->struct_mutex)) {
  1235. DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
  1236. /*
  1237. * Bump the expiration timestamp, otherwise the suspend won't
  1238. * be rescheduled.
  1239. */
  1240. pm_runtime_mark_last_busy(device);
  1241. return -EAGAIN;
  1242. }
  1243. disable_rpm_wakeref_asserts(dev_priv);
  1244. /*
  1245. * We are safe here against re-faults, since the fault handler takes
  1246. * an RPM reference.
  1247. */
  1248. i915_gem_release_all_mmaps(dev_priv);
  1249. mutex_unlock(&dev->struct_mutex);
  1250. cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
  1251. intel_guc_suspend(dev);
  1252. intel_suspend_gt_powersave(dev);
  1253. intel_runtime_pm_disable_interrupts(dev_priv);
  1254. ret = 0;
  1255. if (IS_BROXTON(dev_priv)) {
  1256. bxt_display_core_uninit(dev_priv);
  1257. bxt_enable_dc9(dev_priv);
  1258. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  1259. hsw_enable_pc8(dev_priv);
  1260. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1261. ret = vlv_suspend_complete(dev_priv);
  1262. }
  1263. if (ret) {
  1264. DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
  1265. intel_runtime_pm_enable_interrupts(dev_priv);
  1266. enable_rpm_wakeref_asserts(dev_priv);
  1267. return ret;
  1268. }
  1269. intel_uncore_forcewake_reset(dev, false);
  1270. enable_rpm_wakeref_asserts(dev_priv);
  1271. WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
  1272. if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
  1273. DRM_ERROR("Unclaimed access detected prior to suspending\n");
  1274. dev_priv->pm.suspended = true;
  1275. /*
  1276. * FIXME: We really should find a document that references the arguments
  1277. * used below!
  1278. */
  1279. if (IS_BROADWELL(dev)) {
  1280. /*
  1281. * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
  1282. * being detected, and the call we do at intel_runtime_resume()
  1283. * won't be able to restore them. Since PCI_D3hot matches the
  1284. * actual specification and appears to be working, use it.
  1285. */
  1286. intel_opregion_notify_adapter(dev, PCI_D3hot);
  1287. } else {
  1288. /*
  1289. * current versions of firmware which depend on this opregion
  1290. * notification have repurposed the D1 definition to mean
  1291. * "runtime suspended" vs. what you would normally expect (D3)
  1292. * to distinguish it from notifications that might be sent via
  1293. * the suspend path.
  1294. */
  1295. intel_opregion_notify_adapter(dev, PCI_D1);
  1296. }
  1297. assert_forcewakes_inactive(dev_priv);
  1298. DRM_DEBUG_KMS("Device suspended\n");
  1299. return 0;
  1300. }
  1301. static int intel_runtime_resume(struct device *device)
  1302. {
  1303. struct pci_dev *pdev = to_pci_dev(device);
  1304. struct drm_device *dev = pci_get_drvdata(pdev);
  1305. struct drm_i915_private *dev_priv = dev->dev_private;
  1306. int ret = 0;
  1307. if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
  1308. return -ENODEV;
  1309. DRM_DEBUG_KMS("Resuming device\n");
  1310. WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
  1311. disable_rpm_wakeref_asserts(dev_priv);
  1312. intel_opregion_notify_adapter(dev, PCI_D0);
  1313. dev_priv->pm.suspended = false;
  1314. if (intel_uncore_unclaimed_mmio(dev_priv))
  1315. DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
  1316. intel_guc_resume(dev);
  1317. if (IS_GEN6(dev_priv))
  1318. intel_init_pch_refclk(dev);
  1319. if (IS_BROXTON(dev)) {
  1320. bxt_disable_dc9(dev_priv);
  1321. bxt_display_core_init(dev_priv, true);
  1322. if (dev_priv->csr.dmc_payload &&
  1323. (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
  1324. gen9_enable_dc5(dev_priv);
  1325. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  1326. hsw_disable_pc8(dev_priv);
  1327. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1328. ret = vlv_resume_prepare(dev_priv, true);
  1329. }
  1330. /*
  1331. * No point of rolling back things in case of an error, as the best
  1332. * we can do is to hope that things will still work (and disable RPM).
  1333. */
  1334. i915_gem_init_swizzling(dev);
  1335. gen6_update_ring_freq(dev);
  1336. intel_runtime_pm_enable_interrupts(dev_priv);
  1337. /*
  1338. * On VLV/CHV display interrupts are part of the display
  1339. * power well, so hpd is reinitialized from there. For
  1340. * everyone else do it here.
  1341. */
  1342. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
  1343. intel_hpd_init(dev_priv);
  1344. intel_enable_gt_powersave(dev);
  1345. enable_rpm_wakeref_asserts(dev_priv);
  1346. if (ret)
  1347. DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
  1348. else
  1349. DRM_DEBUG_KMS("Device resumed\n");
  1350. return ret;
  1351. }
  1352. static const struct dev_pm_ops i915_pm_ops = {
  1353. /*
  1354. * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
  1355. * PMSG_RESUME]
  1356. */
  1357. .suspend = i915_pm_suspend,
  1358. .suspend_late = i915_pm_suspend_late,
  1359. .resume_early = i915_pm_resume_early,
  1360. .resume = i915_pm_resume,
  1361. /*
  1362. * S4 event handlers
  1363. * @freeze, @freeze_late : called (1) before creating the
  1364. * hibernation image [PMSG_FREEZE] and
  1365. * (2) after rebooting, before restoring
  1366. * the image [PMSG_QUIESCE]
  1367. * @thaw, @thaw_early : called (1) after creating the hibernation
  1368. * image, before writing it [PMSG_THAW]
  1369. * and (2) after failing to create or
  1370. * restore the image [PMSG_RECOVER]
  1371. * @poweroff, @poweroff_late: called after writing the hibernation
  1372. * image, before rebooting [PMSG_HIBERNATE]
  1373. * @restore, @restore_early : called after rebooting and restoring the
  1374. * hibernation image [PMSG_RESTORE]
  1375. */
  1376. .freeze = i915_pm_suspend,
  1377. .freeze_late = i915_pm_suspend_late,
  1378. .thaw_early = i915_pm_resume_early,
  1379. .thaw = i915_pm_resume,
  1380. .poweroff = i915_pm_suspend,
  1381. .poweroff_late = i915_pm_poweroff_late,
  1382. .restore_early = i915_pm_resume_early,
  1383. .restore = i915_pm_resume,
  1384. /* S0ix (via runtime suspend) event handlers */
  1385. .runtime_suspend = intel_runtime_suspend,
  1386. .runtime_resume = intel_runtime_resume,
  1387. };
  1388. static const struct vm_operations_struct i915_gem_vm_ops = {
  1389. .fault = i915_gem_fault,
  1390. .open = drm_gem_vm_open,
  1391. .close = drm_gem_vm_close,
  1392. };
  1393. static const struct file_operations i915_driver_fops = {
  1394. .owner = THIS_MODULE,
  1395. .open = drm_open,
  1396. .release = drm_release,
  1397. .unlocked_ioctl = drm_ioctl,
  1398. .mmap = drm_gem_mmap,
  1399. .poll = drm_poll,
  1400. .read = drm_read,
  1401. #ifdef CONFIG_COMPAT
  1402. .compat_ioctl = i915_compat_ioctl,
  1403. #endif
  1404. .llseek = noop_llseek,
  1405. };
  1406. static struct drm_driver driver = {
  1407. /* Don't use MTRRs here; the Xserver or userspace app should
  1408. * deal with them for Intel hardware.
  1409. */
  1410. .driver_features =
  1411. DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
  1412. DRIVER_RENDER | DRIVER_MODESET,
  1413. .load = i915_driver_load,
  1414. .unload = i915_driver_unload,
  1415. .open = i915_driver_open,
  1416. .lastclose = i915_driver_lastclose,
  1417. .preclose = i915_driver_preclose,
  1418. .postclose = i915_driver_postclose,
  1419. .set_busid = drm_pci_set_busid,
  1420. #if defined(CONFIG_DEBUG_FS)
  1421. .debugfs_init = i915_debugfs_init,
  1422. .debugfs_cleanup = i915_debugfs_cleanup,
  1423. #endif
  1424. .gem_free_object = i915_gem_free_object,
  1425. .gem_vm_ops = &i915_gem_vm_ops,
  1426. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  1427. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  1428. .gem_prime_export = i915_gem_prime_export,
  1429. .gem_prime_import = i915_gem_prime_import,
  1430. .dumb_create = i915_gem_dumb_create,
  1431. .dumb_map_offset = i915_gem_mmap_gtt,
  1432. .dumb_destroy = drm_gem_dumb_destroy,
  1433. .ioctls = i915_ioctls,
  1434. .fops = &i915_driver_fops,
  1435. .name = DRIVER_NAME,
  1436. .desc = DRIVER_DESC,
  1437. .date = DRIVER_DATE,
  1438. .major = DRIVER_MAJOR,
  1439. .minor = DRIVER_MINOR,
  1440. .patchlevel = DRIVER_PATCHLEVEL,
  1441. };
  1442. static struct pci_driver i915_pci_driver = {
  1443. .name = DRIVER_NAME,
  1444. .id_table = pciidlist,
  1445. .probe = i915_pci_probe,
  1446. .remove = i915_pci_remove,
  1447. .driver.pm = &i915_pm_ops,
  1448. };
  1449. static int __init i915_init(void)
  1450. {
  1451. driver.num_ioctls = i915_max_ioctl;
  1452. /*
  1453. * Enable KMS by default, unless explicitly overriden by
  1454. * either the i915.modeset prarameter or by the
  1455. * vga_text_mode_force boot option.
  1456. */
  1457. if (i915.modeset == 0)
  1458. driver.driver_features &= ~DRIVER_MODESET;
  1459. #ifdef CONFIG_VGA_CONSOLE
  1460. if (vgacon_text_force() && i915.modeset == -1)
  1461. driver.driver_features &= ~DRIVER_MODESET;
  1462. #endif
  1463. if (!(driver.driver_features & DRIVER_MODESET)) {
  1464. /* Silently fail loading to not upset userspace. */
  1465. DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
  1466. return 0;
  1467. }
  1468. if (i915.nuclear_pageflip)
  1469. driver.driver_features |= DRIVER_ATOMIC;
  1470. return drm_pci_init(&driver, &i915_pci_driver);
  1471. }
  1472. static void __exit i915_exit(void)
  1473. {
  1474. if (!(driver.driver_features & DRIVER_MODESET))
  1475. return; /* Never loaded a driver. */
  1476. drm_pci_exit(&driver, &i915_pci_driver);
  1477. }
  1478. module_init(i915_init);
  1479. module_exit(i915_exit);
  1480. MODULE_AUTHOR("Tungsten Graphics, Inc.");
  1481. MODULE_AUTHOR("Intel Corporation");
  1482. MODULE_DESCRIPTION(DRIVER_DESC);
  1483. MODULE_LICENSE("GPL and additional rights");