i915_drv.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
  2. */
  3. /*
  4. *
  5. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the
  10. * "Software"), to deal in the Software without restriction, including
  11. * without limitation the rights to use, copy, modify, merge, publish,
  12. * distribute, sub license, and/or sell copies of the Software, and to
  13. * permit persons to whom the Software is furnished to do so, subject to
  14. * the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the
  17. * next paragraph) shall be included in all copies or substantial portions
  18. * of the Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27. *
  28. */
  29. #include <linux/device.h>
  30. #include <drm/drmP.h>
  31. #include <drm/i915_drm.h>
  32. #include "i915_drv.h"
  33. #include "i915_trace.h"
  34. #include "intel_drv.h"
  35. #include <linux/console.h>
  36. #include <linux/module.h>
  37. #include <drm/drm_crtc_helper.h>
  38. static struct drm_driver driver;
  39. #define GEN_DEFAULT_PIPEOFFSETS \
  40. .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
  41. PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
  42. .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
  43. TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
  44. .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
  45. .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
  46. .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
  47. static const struct intel_device_info intel_i830_info = {
  48. .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
  49. .has_overlay = 1, .overlay_needs_physical = 1,
  50. .ring_mask = RENDER_RING,
  51. GEN_DEFAULT_PIPEOFFSETS,
  52. };
  53. static const struct intel_device_info intel_845g_info = {
  54. .gen = 2, .num_pipes = 1,
  55. .has_overlay = 1, .overlay_needs_physical = 1,
  56. .ring_mask = RENDER_RING,
  57. GEN_DEFAULT_PIPEOFFSETS,
  58. };
  59. static const struct intel_device_info intel_i85x_info = {
  60. .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
  61. .cursor_needs_physical = 1,
  62. .has_overlay = 1, .overlay_needs_physical = 1,
  63. .has_fbc = 1,
  64. .ring_mask = RENDER_RING,
  65. GEN_DEFAULT_PIPEOFFSETS,
  66. };
  67. static const struct intel_device_info intel_i865g_info = {
  68. .gen = 2, .num_pipes = 1,
  69. .has_overlay = 1, .overlay_needs_physical = 1,
  70. .ring_mask = RENDER_RING,
  71. GEN_DEFAULT_PIPEOFFSETS,
  72. };
  73. static const struct intel_device_info intel_i915g_info = {
  74. .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
  75. .has_overlay = 1, .overlay_needs_physical = 1,
  76. .ring_mask = RENDER_RING,
  77. GEN_DEFAULT_PIPEOFFSETS,
  78. };
  79. static const struct intel_device_info intel_i915gm_info = {
  80. .gen = 3, .is_mobile = 1, .num_pipes = 2,
  81. .cursor_needs_physical = 1,
  82. .has_overlay = 1, .overlay_needs_physical = 1,
  83. .supports_tv = 1,
  84. .has_fbc = 1,
  85. .ring_mask = RENDER_RING,
  86. GEN_DEFAULT_PIPEOFFSETS,
  87. };
  88. static const struct intel_device_info intel_i945g_info = {
  89. .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
  90. .has_overlay = 1, .overlay_needs_physical = 1,
  91. .ring_mask = RENDER_RING,
  92. GEN_DEFAULT_PIPEOFFSETS,
  93. };
  94. static const struct intel_device_info intel_i945gm_info = {
  95. .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
  96. .has_hotplug = 1, .cursor_needs_physical = 1,
  97. .has_overlay = 1, .overlay_needs_physical = 1,
  98. .supports_tv = 1,
  99. .has_fbc = 1,
  100. .ring_mask = RENDER_RING,
  101. GEN_DEFAULT_PIPEOFFSETS,
  102. };
  103. static const struct intel_device_info intel_i965g_info = {
  104. .gen = 4, .is_broadwater = 1, .num_pipes = 2,
  105. .has_hotplug = 1,
  106. .has_overlay = 1,
  107. .ring_mask = RENDER_RING,
  108. GEN_DEFAULT_PIPEOFFSETS,
  109. };
  110. static const struct intel_device_info intel_i965gm_info = {
  111. .gen = 4, .is_crestline = 1, .num_pipes = 2,
  112. .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
  113. .has_overlay = 1,
  114. .supports_tv = 1,
  115. .ring_mask = RENDER_RING,
  116. GEN_DEFAULT_PIPEOFFSETS,
  117. };
  118. static const struct intel_device_info intel_g33_info = {
  119. .gen = 3, .is_g33 = 1, .num_pipes = 2,
  120. .need_gfx_hws = 1, .has_hotplug = 1,
  121. .has_overlay = 1,
  122. .ring_mask = RENDER_RING,
  123. GEN_DEFAULT_PIPEOFFSETS,
  124. };
  125. static const struct intel_device_info intel_g45_info = {
  126. .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
  127. .has_pipe_cxsr = 1, .has_hotplug = 1,
  128. .ring_mask = RENDER_RING | BSD_RING,
  129. GEN_DEFAULT_PIPEOFFSETS,
  130. };
  131. static const struct intel_device_info intel_gm45_info = {
  132. .gen = 4, .is_g4x = 1, .num_pipes = 2,
  133. .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
  134. .has_pipe_cxsr = 1, .has_hotplug = 1,
  135. .supports_tv = 1,
  136. .ring_mask = RENDER_RING | BSD_RING,
  137. GEN_DEFAULT_PIPEOFFSETS,
  138. };
  139. static const struct intel_device_info intel_pineview_info = {
  140. .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
  141. .need_gfx_hws = 1, .has_hotplug = 1,
  142. .has_overlay = 1,
  143. GEN_DEFAULT_PIPEOFFSETS,
  144. };
  145. static const struct intel_device_info intel_ironlake_d_info = {
  146. .gen = 5, .num_pipes = 2,
  147. .need_gfx_hws = 1, .has_hotplug = 1,
  148. .ring_mask = RENDER_RING | BSD_RING,
  149. GEN_DEFAULT_PIPEOFFSETS,
  150. };
  151. static const struct intel_device_info intel_ironlake_m_info = {
  152. .gen = 5, .is_mobile = 1, .num_pipes = 2,
  153. .need_gfx_hws = 1, .has_hotplug = 1,
  154. .has_fbc = 1,
  155. .ring_mask = RENDER_RING | BSD_RING,
  156. GEN_DEFAULT_PIPEOFFSETS,
  157. };
  158. static const struct intel_device_info intel_sandybridge_d_info = {
  159. .gen = 6, .num_pipes = 2,
  160. .need_gfx_hws = 1, .has_hotplug = 1,
  161. .has_fbc = 1,
  162. .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
  163. .has_llc = 1,
  164. GEN_DEFAULT_PIPEOFFSETS,
  165. };
  166. static const struct intel_device_info intel_sandybridge_m_info = {
  167. .gen = 6, .is_mobile = 1, .num_pipes = 2,
  168. .need_gfx_hws = 1, .has_hotplug = 1,
  169. .has_fbc = 1,
  170. .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
  171. .has_llc = 1,
  172. GEN_DEFAULT_PIPEOFFSETS,
  173. };
  174. #define GEN7_FEATURES \
  175. .gen = 7, .num_pipes = 3, \
  176. .need_gfx_hws = 1, .has_hotplug = 1, \
  177. .has_fbc = 1, \
  178. .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
  179. .has_llc = 1
  180. static const struct intel_device_info intel_ivybridge_d_info = {
  181. GEN7_FEATURES,
  182. .is_ivybridge = 1,
  183. GEN_DEFAULT_PIPEOFFSETS,
  184. };
  185. static const struct intel_device_info intel_ivybridge_m_info = {
  186. GEN7_FEATURES,
  187. .is_ivybridge = 1,
  188. .is_mobile = 1,
  189. GEN_DEFAULT_PIPEOFFSETS,
  190. };
  191. static const struct intel_device_info intel_ivybridge_q_info = {
  192. GEN7_FEATURES,
  193. .is_ivybridge = 1,
  194. .num_pipes = 0, /* legal, last one wins */
  195. GEN_DEFAULT_PIPEOFFSETS,
  196. };
  197. static const struct intel_device_info intel_valleyview_m_info = {
  198. GEN7_FEATURES,
  199. .is_mobile = 1,
  200. .num_pipes = 2,
  201. .is_valleyview = 1,
  202. .display_mmio_offset = VLV_DISPLAY_BASE,
  203. .has_fbc = 0, /* legal, last one wins */
  204. .has_llc = 0, /* legal, last one wins */
  205. GEN_DEFAULT_PIPEOFFSETS,
  206. };
  207. static const struct intel_device_info intel_valleyview_d_info = {
  208. GEN7_FEATURES,
  209. .num_pipes = 2,
  210. .is_valleyview = 1,
  211. .display_mmio_offset = VLV_DISPLAY_BASE,
  212. .has_fbc = 0, /* legal, last one wins */
  213. .has_llc = 0, /* legal, last one wins */
  214. GEN_DEFAULT_PIPEOFFSETS,
  215. };
  216. static const struct intel_device_info intel_haswell_d_info = {
  217. GEN7_FEATURES,
  218. .is_haswell = 1,
  219. .has_ddi = 1,
  220. .has_fpga_dbg = 1,
  221. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  222. GEN_DEFAULT_PIPEOFFSETS,
  223. };
  224. static const struct intel_device_info intel_haswell_m_info = {
  225. GEN7_FEATURES,
  226. .is_haswell = 1,
  227. .is_mobile = 1,
  228. .has_ddi = 1,
  229. .has_fpga_dbg = 1,
  230. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  231. GEN_DEFAULT_PIPEOFFSETS,
  232. };
  233. static const struct intel_device_info intel_broadwell_d_info = {
  234. .gen = 8, .num_pipes = 3,
  235. .need_gfx_hws = 1, .has_hotplug = 1,
  236. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  237. .has_llc = 1,
  238. .has_ddi = 1,
  239. .has_fbc = 1,
  240. GEN_DEFAULT_PIPEOFFSETS,
  241. };
  242. static const struct intel_device_info intel_broadwell_m_info = {
  243. .gen = 8, .is_mobile = 1, .num_pipes = 3,
  244. .need_gfx_hws = 1, .has_hotplug = 1,
  245. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  246. .has_llc = 1,
  247. .has_ddi = 1,
  248. .has_fbc = 1,
  249. GEN_DEFAULT_PIPEOFFSETS,
  250. };
  251. static const struct intel_device_info intel_broadwell_gt3d_info = {
  252. .gen = 8, .num_pipes = 3,
  253. .need_gfx_hws = 1, .has_hotplug = 1,
  254. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  255. .has_llc = 1,
  256. .has_ddi = 1,
  257. .has_fbc = 1,
  258. GEN_DEFAULT_PIPEOFFSETS,
  259. };
  260. static const struct intel_device_info intel_broadwell_gt3m_info = {
  261. .gen = 8, .is_mobile = 1, .num_pipes = 3,
  262. .need_gfx_hws = 1, .has_hotplug = 1,
  263. .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  264. .has_llc = 1,
  265. .has_ddi = 1,
  266. .has_fbc = 1,
  267. GEN_DEFAULT_PIPEOFFSETS,
  268. };
  269. /*
  270. * Make sure any device matches here are from most specific to most
  271. * general. For example, since the Quanta match is based on the subsystem
  272. * and subvendor IDs, we need it to come before the more general IVB
  273. * PCI ID matches, otherwise we'll use the wrong info struct above.
  274. */
  275. #define INTEL_PCI_IDS \
  276. INTEL_I830_IDS(&intel_i830_info), \
  277. INTEL_I845G_IDS(&intel_845g_info), \
  278. INTEL_I85X_IDS(&intel_i85x_info), \
  279. INTEL_I865G_IDS(&intel_i865g_info), \
  280. INTEL_I915G_IDS(&intel_i915g_info), \
  281. INTEL_I915GM_IDS(&intel_i915gm_info), \
  282. INTEL_I945G_IDS(&intel_i945g_info), \
  283. INTEL_I945GM_IDS(&intel_i945gm_info), \
  284. INTEL_I965G_IDS(&intel_i965g_info), \
  285. INTEL_G33_IDS(&intel_g33_info), \
  286. INTEL_I965GM_IDS(&intel_i965gm_info), \
  287. INTEL_GM45_IDS(&intel_gm45_info), \
  288. INTEL_G45_IDS(&intel_g45_info), \
  289. INTEL_PINEVIEW_IDS(&intel_pineview_info), \
  290. INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
  291. INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
  292. INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
  293. INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
  294. INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
  295. INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
  296. INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
  297. INTEL_HSW_D_IDS(&intel_haswell_d_info), \
  298. INTEL_HSW_M_IDS(&intel_haswell_m_info), \
  299. INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
  300. INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
  301. INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
  302. INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
  303. INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
  304. INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info)
  305. static const struct pci_device_id pciidlist[] = { /* aka */
  306. INTEL_PCI_IDS,
  307. {0, 0, 0}
  308. };
  309. #if defined(CONFIG_DRM_I915_KMS)
  310. MODULE_DEVICE_TABLE(pci, pciidlist);
  311. #endif
  312. void intel_detect_pch(struct drm_device *dev)
  313. {
  314. struct drm_i915_private *dev_priv = dev->dev_private;
  315. struct pci_dev *pch = NULL;
  316. /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
  317. * (which really amounts to a PCH but no South Display).
  318. */
  319. if (INTEL_INFO(dev)->num_pipes == 0) {
  320. dev_priv->pch_type = PCH_NOP;
  321. return;
  322. }
  323. /*
  324. * The reason to probe ISA bridge instead of Dev31:Fun0 is to
  325. * make graphics device passthrough work easy for VMM, that only
  326. * need to expose ISA bridge to let driver know the real hardware
  327. * underneath. This is a requirement from virtualization team.
  328. *
  329. * In some virtualized environments (e.g. XEN), there is irrelevant
  330. * ISA bridge in the system. To work reliably, we should scan trhough
  331. * all the ISA bridge devices and check for the first match, instead
  332. * of only checking the first one.
  333. */
  334. while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
  335. if (pch->vendor == PCI_VENDOR_ID_INTEL) {
  336. unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
  337. dev_priv->pch_id = id;
  338. if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
  339. dev_priv->pch_type = PCH_IBX;
  340. DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
  341. WARN_ON(!IS_GEN5(dev));
  342. } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
  343. dev_priv->pch_type = PCH_CPT;
  344. DRM_DEBUG_KMS("Found CougarPoint PCH\n");
  345. WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
  346. } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
  347. /* PantherPoint is CPT compatible */
  348. dev_priv->pch_type = PCH_CPT;
  349. DRM_DEBUG_KMS("Found PantherPoint PCH\n");
  350. WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
  351. } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
  352. dev_priv->pch_type = PCH_LPT;
  353. DRM_DEBUG_KMS("Found LynxPoint PCH\n");
  354. WARN_ON(!IS_HASWELL(dev));
  355. WARN_ON(IS_ULT(dev));
  356. } else if (IS_BROADWELL(dev)) {
  357. dev_priv->pch_type = PCH_LPT;
  358. dev_priv->pch_id =
  359. INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
  360. DRM_DEBUG_KMS("This is Broadwell, assuming "
  361. "LynxPoint LP PCH\n");
  362. } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
  363. dev_priv->pch_type = PCH_LPT;
  364. DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
  365. WARN_ON(!IS_HASWELL(dev));
  366. WARN_ON(!IS_ULT(dev));
  367. } else
  368. continue;
  369. break;
  370. }
  371. }
  372. if (!pch)
  373. DRM_DEBUG_KMS("No PCH found.\n");
  374. pci_dev_put(pch);
  375. }
  376. bool i915_semaphore_is_enabled(struct drm_device *dev)
  377. {
  378. if (INTEL_INFO(dev)->gen < 6)
  379. return false;
  380. if (i915.semaphores >= 0)
  381. return i915.semaphores;
  382. /* Until we get further testing... */
  383. if (IS_GEN8(dev))
  384. return false;
  385. #ifdef CONFIG_INTEL_IOMMU
  386. /* Enable semaphores on SNB when IO remapping is off */
  387. if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
  388. return false;
  389. #endif
  390. return true;
  391. }
  392. static int i915_drm_freeze(struct drm_device *dev)
  393. {
  394. struct drm_i915_private *dev_priv = dev->dev_private;
  395. struct drm_crtc *crtc;
  396. intel_runtime_pm_get(dev_priv);
  397. /* ignore lid events during suspend */
  398. mutex_lock(&dev_priv->modeset_restore_lock);
  399. dev_priv->modeset_restore = MODESET_SUSPENDED;
  400. mutex_unlock(&dev_priv->modeset_restore_lock);
  401. /* We do a lot of poking in a lot of registers, make sure they work
  402. * properly. */
  403. intel_display_set_init_power(dev_priv, true);
  404. drm_kms_helper_poll_disable(dev);
  405. pci_save_state(dev->pdev);
  406. /* If KMS is active, we do the leavevt stuff here */
  407. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  408. int error;
  409. error = i915_gem_suspend(dev);
  410. if (error) {
  411. dev_err(&dev->pdev->dev,
  412. "GEM idle failed, resume might fail\n");
  413. return error;
  414. }
  415. cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
  416. drm_irq_uninstall(dev);
  417. dev_priv->enable_hotplug_processing = false;
  418. /*
  419. * Disable CRTCs directly since we want to preserve sw state
  420. * for _thaw.
  421. */
  422. mutex_lock(&dev->mode_config.mutex);
  423. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
  424. dev_priv->display.crtc_disable(crtc);
  425. mutex_unlock(&dev->mode_config.mutex);
  426. intel_modeset_suspend_hw(dev);
  427. }
  428. i915_gem_suspend_gtt_mappings(dev);
  429. i915_save_state(dev);
  430. intel_opregion_fini(dev);
  431. intel_uncore_fini(dev);
  432. console_lock();
  433. intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
  434. console_unlock();
  435. dev_priv->suspend_count++;
  436. return 0;
  437. }
  438. int i915_suspend(struct drm_device *dev, pm_message_t state)
  439. {
  440. int error;
  441. if (!dev || !dev->dev_private) {
  442. DRM_ERROR("dev: %p\n", dev);
  443. DRM_ERROR("DRM not initialized, aborting suspend.\n");
  444. return -ENODEV;
  445. }
  446. if (state.event == PM_EVENT_PRETHAW)
  447. return 0;
  448. if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  449. return 0;
  450. error = i915_drm_freeze(dev);
  451. if (error)
  452. return error;
  453. if (state.event == PM_EVENT_SUSPEND) {
  454. /* Shut down the device */
  455. pci_disable_device(dev->pdev);
  456. pci_set_power_state(dev->pdev, PCI_D3hot);
  457. }
  458. return 0;
  459. }
  460. void intel_console_resume(struct work_struct *work)
  461. {
  462. struct drm_i915_private *dev_priv =
  463. container_of(work, struct drm_i915_private,
  464. console_resume_work);
  465. struct drm_device *dev = dev_priv->dev;
  466. console_lock();
  467. intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
  468. console_unlock();
  469. }
  470. static void intel_resume_hotplug(struct drm_device *dev)
  471. {
  472. struct drm_mode_config *mode_config = &dev->mode_config;
  473. struct intel_encoder *encoder;
  474. mutex_lock(&mode_config->mutex);
  475. DRM_DEBUG_KMS("running encoder hotplug functions\n");
  476. list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
  477. if (encoder->hot_plug)
  478. encoder->hot_plug(encoder);
  479. mutex_unlock(&mode_config->mutex);
  480. /* Just fire off a uevent and let userspace tell us what to do */
  481. drm_helper_hpd_irq_event(dev);
  482. }
  483. static int i915_drm_thaw_early(struct drm_device *dev)
  484. {
  485. struct drm_i915_private *dev_priv = dev->dev_private;
  486. intel_uncore_early_sanitize(dev);
  487. intel_uncore_sanitize(dev);
  488. intel_power_domains_init_hw(dev_priv);
  489. return 0;
  490. }
  491. static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
  492. {
  493. struct drm_i915_private *dev_priv = dev->dev_private;
  494. if (drm_core_check_feature(dev, DRIVER_MODESET) &&
  495. restore_gtt_mappings) {
  496. mutex_lock(&dev->struct_mutex);
  497. i915_gem_restore_gtt_mappings(dev);
  498. mutex_unlock(&dev->struct_mutex);
  499. }
  500. i915_restore_state(dev);
  501. intel_opregion_setup(dev);
  502. /* KMS EnterVT equivalent */
  503. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  504. intel_init_pch_refclk(dev);
  505. drm_mode_config_reset(dev);
  506. mutex_lock(&dev->struct_mutex);
  507. if (i915_gem_init_hw(dev)) {
  508. DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
  509. atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
  510. }
  511. mutex_unlock(&dev->struct_mutex);
  512. /* We need working interrupts for modeset enabling ... */
  513. drm_irq_install(dev, dev->pdev->irq);
  514. intel_modeset_init_hw(dev);
  515. drm_modeset_lock_all(dev);
  516. intel_modeset_setup_hw_state(dev, true);
  517. drm_modeset_unlock_all(dev);
  518. /*
  519. * ... but also need to make sure that hotplug processing
  520. * doesn't cause havoc. Like in the driver load code we don't
  521. * bother with the tiny race here where we might loose hotplug
  522. * notifications.
  523. * */
  524. intel_hpd_init(dev);
  525. dev_priv->enable_hotplug_processing = true;
  526. /* Config may have changed between suspend and resume */
  527. intel_resume_hotplug(dev);
  528. }
  529. intel_opregion_init(dev);
  530. /*
  531. * The console lock can be pretty contented on resume due
  532. * to all the printk activity. Try to keep it out of the hot
  533. * path of resume if possible.
  534. */
  535. if (console_trylock()) {
  536. intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
  537. console_unlock();
  538. } else {
  539. schedule_work(&dev_priv->console_resume_work);
  540. }
  541. mutex_lock(&dev_priv->modeset_restore_lock);
  542. dev_priv->modeset_restore = MODESET_DONE;
  543. mutex_unlock(&dev_priv->modeset_restore_lock);
  544. intel_runtime_pm_put(dev_priv);
  545. return 0;
  546. }
  547. static int i915_drm_thaw(struct drm_device *dev)
  548. {
  549. if (drm_core_check_feature(dev, DRIVER_MODESET))
  550. i915_check_and_clear_faults(dev);
  551. return __i915_drm_thaw(dev, true);
  552. }
  553. static int i915_resume_early(struct drm_device *dev)
  554. {
  555. if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  556. return 0;
  557. /*
  558. * We have a resume ordering issue with the snd-hda driver also
  559. * requiring our device to be power up. Due to the lack of a
  560. * parent/child relationship we currently solve this with an early
  561. * resume hook.
  562. *
  563. * FIXME: This should be solved with a special hdmi sink device or
  564. * similar so that power domains can be employed.
  565. */
  566. if (pci_enable_device(dev->pdev))
  567. return -EIO;
  568. pci_set_master(dev->pdev);
  569. return i915_drm_thaw_early(dev);
  570. }
  571. int i915_resume(struct drm_device *dev)
  572. {
  573. struct drm_i915_private *dev_priv = dev->dev_private;
  574. int ret;
  575. /*
  576. * Platforms with opregion should have sane BIOS, older ones (gen3 and
  577. * earlier) need to restore the GTT mappings since the BIOS might clear
  578. * all our scratch PTEs.
  579. */
  580. ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
  581. if (ret)
  582. return ret;
  583. drm_kms_helper_poll_enable(dev);
  584. return 0;
  585. }
  586. static int i915_resume_legacy(struct drm_device *dev)
  587. {
  588. i915_resume_early(dev);
  589. i915_resume(dev);
  590. return 0;
  591. }
  592. /**
  593. * i915_reset - reset chip after a hang
  594. * @dev: drm device to reset
  595. *
  596. * Reset the chip. Useful if a hang is detected. Returns zero on successful
  597. * reset or otherwise an error code.
  598. *
  599. * Procedure is fairly simple:
  600. * - reset the chip using the reset reg
  601. * - re-init context state
  602. * - re-init hardware status page
  603. * - re-init ring buffer
  604. * - re-init interrupt state
  605. * - re-init display
  606. */
  607. int i915_reset(struct drm_device *dev)
  608. {
  609. struct drm_i915_private *dev_priv = dev->dev_private;
  610. bool simulated;
  611. int ret;
  612. if (!i915.reset)
  613. return 0;
  614. mutex_lock(&dev->struct_mutex);
  615. i915_gem_reset(dev);
  616. simulated = dev_priv->gpu_error.stop_rings != 0;
  617. ret = intel_gpu_reset(dev);
  618. /* Also reset the gpu hangman. */
  619. if (simulated) {
  620. DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
  621. dev_priv->gpu_error.stop_rings = 0;
  622. if (ret == -ENODEV) {
  623. DRM_INFO("Reset not implemented, but ignoring "
  624. "error for simulated gpu hangs\n");
  625. ret = 0;
  626. }
  627. }
  628. if (ret) {
  629. DRM_ERROR("Failed to reset chip: %i\n", ret);
  630. mutex_unlock(&dev->struct_mutex);
  631. return ret;
  632. }
  633. /* Ok, now get things going again... */
  634. /*
  635. * Everything depends on having the GTT running, so we need to start
  636. * there. Fortunately we don't need to do this unless we reset the
  637. * chip at a PCI level.
  638. *
  639. * Next we need to restore the context, but we don't use those
  640. * yet either...
  641. *
  642. * Ring buffer needs to be re-initialized in the KMS case, or if X
  643. * was running at the time of the reset (i.e. we weren't VT
  644. * switched away).
  645. */
  646. if (drm_core_check_feature(dev, DRIVER_MODESET) ||
  647. !dev_priv->ums.mm_suspended) {
  648. dev_priv->ums.mm_suspended = 0;
  649. ret = i915_gem_init_hw(dev);
  650. mutex_unlock(&dev->struct_mutex);
  651. if (ret) {
  652. DRM_ERROR("Failed hw init on reset %d\n", ret);
  653. return ret;
  654. }
  655. /*
  656. * FIXME: This is horribly race against concurrent pageflip and
  657. * vblank wait ioctls since they can observe dev->irqs_disabled
  658. * being false when they shouldn't be able to.
  659. */
  660. drm_irq_uninstall(dev);
  661. drm_irq_install(dev, dev->pdev->irq);
  662. /* rps/rc6 re-init is necessary to restore state lost after the
  663. * reset and the re-install of drm irq. Skip for ironlake per
  664. * previous concerns that it doesn't respond well to some forms
  665. * of re-init after reset. */
  666. if (INTEL_INFO(dev)->gen > 5)
  667. intel_reset_gt_powersave(dev);
  668. intel_hpd_init(dev);
  669. } else {
  670. mutex_unlock(&dev->struct_mutex);
  671. }
  672. return 0;
  673. }
  674. static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  675. {
  676. struct intel_device_info *intel_info =
  677. (struct intel_device_info *) ent->driver_data;
  678. if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
  679. DRM_INFO("This hardware requires preliminary hardware support.\n"
  680. "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
  681. return -ENODEV;
  682. }
  683. /* Only bind to function 0 of the device. Early generations
  684. * used function 1 as a placeholder for multi-head. This causes
  685. * us confusion instead, especially on the systems where both
  686. * functions have the same PCI-ID!
  687. */
  688. if (PCI_FUNC(pdev->devfn))
  689. return -ENODEV;
  690. driver.driver_features &= ~(DRIVER_USE_AGP);
  691. return drm_get_pci_dev(pdev, ent, &driver);
  692. }
  693. static void
  694. i915_pci_remove(struct pci_dev *pdev)
  695. {
  696. struct drm_device *dev = pci_get_drvdata(pdev);
  697. drm_put_dev(dev);
  698. }
  699. static int i915_pm_suspend(struct device *dev)
  700. {
  701. struct pci_dev *pdev = to_pci_dev(dev);
  702. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  703. if (!drm_dev || !drm_dev->dev_private) {
  704. dev_err(dev, "DRM not initialized, aborting suspend.\n");
  705. return -ENODEV;
  706. }
  707. if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  708. return 0;
  709. return i915_drm_freeze(drm_dev);
  710. }
  711. static int i915_pm_suspend_late(struct device *dev)
  712. {
  713. struct pci_dev *pdev = to_pci_dev(dev);
  714. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  715. /*
  716. * We have a suspedn ordering issue with the snd-hda driver also
  717. * requiring our device to be power up. Due to the lack of a
  718. * parent/child relationship we currently solve this with an late
  719. * suspend hook.
  720. *
  721. * FIXME: This should be solved with a special hdmi sink device or
  722. * similar so that power domains can be employed.
  723. */
  724. if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  725. return 0;
  726. pci_disable_device(pdev);
  727. pci_set_power_state(pdev, PCI_D3hot);
  728. return 0;
  729. }
  730. static int i915_pm_resume_early(struct device *dev)
  731. {
  732. struct pci_dev *pdev = to_pci_dev(dev);
  733. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  734. return i915_resume_early(drm_dev);
  735. }
  736. static int i915_pm_resume(struct device *dev)
  737. {
  738. struct pci_dev *pdev = to_pci_dev(dev);
  739. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  740. return i915_resume(drm_dev);
  741. }
  742. static int i915_pm_freeze(struct device *dev)
  743. {
  744. struct pci_dev *pdev = to_pci_dev(dev);
  745. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  746. if (!drm_dev || !drm_dev->dev_private) {
  747. dev_err(dev, "DRM not initialized, aborting suspend.\n");
  748. return -ENODEV;
  749. }
  750. return i915_drm_freeze(drm_dev);
  751. }
  752. static int i915_pm_thaw_early(struct device *dev)
  753. {
  754. struct pci_dev *pdev = to_pci_dev(dev);
  755. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  756. return i915_drm_thaw_early(drm_dev);
  757. }
  758. static int i915_pm_thaw(struct device *dev)
  759. {
  760. struct pci_dev *pdev = to_pci_dev(dev);
  761. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  762. return i915_drm_thaw(drm_dev);
  763. }
  764. static int i915_pm_poweroff(struct device *dev)
  765. {
  766. struct pci_dev *pdev = to_pci_dev(dev);
  767. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  768. return i915_drm_freeze(drm_dev);
  769. }
  770. static void hsw_runtime_suspend(struct drm_i915_private *dev_priv)
  771. {
  772. hsw_enable_pc8(dev_priv);
  773. }
  774. static void snb_runtime_resume(struct drm_i915_private *dev_priv)
  775. {
  776. struct drm_device *dev = dev_priv->dev;
  777. intel_init_pch_refclk(dev);
  778. }
  779. static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
  780. {
  781. hsw_disable_pc8(dev_priv);
  782. }
  783. int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
  784. {
  785. u32 val;
  786. int err;
  787. val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  788. WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
  789. #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
  790. /* Wait for a previous force-off to settle */
  791. if (force_on) {
  792. err = wait_for(!COND, 20);
  793. if (err) {
  794. DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
  795. I915_READ(VLV_GTLC_SURVIVABILITY_REG));
  796. return err;
  797. }
  798. }
  799. val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  800. val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
  801. if (force_on)
  802. val |= VLV_GFX_CLK_FORCE_ON_BIT;
  803. I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
  804. if (!force_on)
  805. return 0;
  806. err = wait_for(COND, 20);
  807. if (err)
  808. DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
  809. I915_READ(VLV_GTLC_SURVIVABILITY_REG));
  810. return err;
  811. #undef COND
  812. }
  813. static int intel_runtime_suspend(struct device *device)
  814. {
  815. struct pci_dev *pdev = to_pci_dev(device);
  816. struct drm_device *dev = pci_get_drvdata(pdev);
  817. struct drm_i915_private *dev_priv = dev->dev_private;
  818. if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
  819. return -ENODEV;
  820. WARN_ON(!HAS_RUNTIME_PM(dev));
  821. assert_force_wake_inactive(dev_priv);
  822. DRM_DEBUG_KMS("Suspending device\n");
  823. /*
  824. * rps.work can't be rearmed here, since we get here only after making
  825. * sure the GPU is idle and the RPS freq is set to the minimum. See
  826. * intel_mark_idle().
  827. */
  828. cancel_work_sync(&dev_priv->rps.work);
  829. intel_runtime_pm_disable_interrupts(dev);
  830. if (IS_GEN6(dev))
  831. ;
  832. else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  833. hsw_runtime_suspend(dev_priv);
  834. else
  835. WARN_ON(1);
  836. i915_gem_release_all_mmaps(dev_priv);
  837. del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  838. dev_priv->pm.suspended = true;
  839. /*
  840. * current versions of firmware which depend on this opregion
  841. * notification have repurposed the D1 definition to mean
  842. * "runtime suspended" vs. what you would normally expect (D3)
  843. * to distinguish it from notifications that might be sent
  844. * via the suspend path.
  845. */
  846. intel_opregion_notify_adapter(dev, PCI_D1);
  847. DRM_DEBUG_KMS("Device suspended\n");
  848. return 0;
  849. }
  850. static int intel_runtime_resume(struct device *device)
  851. {
  852. struct pci_dev *pdev = to_pci_dev(device);
  853. struct drm_device *dev = pci_get_drvdata(pdev);
  854. struct drm_i915_private *dev_priv = dev->dev_private;
  855. WARN_ON(!HAS_RUNTIME_PM(dev));
  856. DRM_DEBUG_KMS("Resuming device\n");
  857. intel_opregion_notify_adapter(dev, PCI_D0);
  858. dev_priv->pm.suspended = false;
  859. if (IS_GEN6(dev))
  860. snb_runtime_resume(dev_priv);
  861. else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  862. hsw_runtime_resume(dev_priv);
  863. else
  864. WARN_ON(1);
  865. i915_gem_init_swizzling(dev);
  866. gen6_update_ring_freq(dev);
  867. intel_runtime_pm_restore_interrupts(dev);
  868. intel_reset_gt_powersave(dev);
  869. DRM_DEBUG_KMS("Device resumed\n");
  870. return 0;
  871. }
  872. static const struct dev_pm_ops i915_pm_ops = {
  873. .suspend = i915_pm_suspend,
  874. .suspend_late = i915_pm_suspend_late,
  875. .resume_early = i915_pm_resume_early,
  876. .resume = i915_pm_resume,
  877. .freeze = i915_pm_freeze,
  878. .thaw_early = i915_pm_thaw_early,
  879. .thaw = i915_pm_thaw,
  880. .poweroff = i915_pm_poweroff,
  881. .restore_early = i915_pm_resume_early,
  882. .restore = i915_pm_resume,
  883. .runtime_suspend = intel_runtime_suspend,
  884. .runtime_resume = intel_runtime_resume,
  885. };
  886. static const struct vm_operations_struct i915_gem_vm_ops = {
  887. .fault = i915_gem_fault,
  888. .open = drm_gem_vm_open,
  889. .close = drm_gem_vm_close,
  890. };
  891. static const struct file_operations i915_driver_fops = {
  892. .owner = THIS_MODULE,
  893. .open = drm_open,
  894. .release = drm_release,
  895. .unlocked_ioctl = drm_ioctl,
  896. .mmap = drm_gem_mmap,
  897. .poll = drm_poll,
  898. .read = drm_read,
  899. #ifdef CONFIG_COMPAT
  900. .compat_ioctl = i915_compat_ioctl,
  901. #endif
  902. .llseek = noop_llseek,
  903. };
  904. static struct drm_driver driver = {
  905. /* Don't use MTRRs here; the Xserver or userspace app should
  906. * deal with them for Intel hardware.
  907. */
  908. .driver_features =
  909. DRIVER_USE_AGP |
  910. DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
  911. DRIVER_RENDER,
  912. .load = i915_driver_load,
  913. .unload = i915_driver_unload,
  914. .open = i915_driver_open,
  915. .lastclose = i915_driver_lastclose,
  916. .preclose = i915_driver_preclose,
  917. .postclose = i915_driver_postclose,
  918. /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
  919. .suspend = i915_suspend,
  920. .resume = i915_resume_legacy,
  921. .device_is_agp = i915_driver_device_is_agp,
  922. .master_create = i915_master_create,
  923. .master_destroy = i915_master_destroy,
  924. #if defined(CONFIG_DEBUG_FS)
  925. .debugfs_init = i915_debugfs_init,
  926. .debugfs_cleanup = i915_debugfs_cleanup,
  927. #endif
  928. .gem_free_object = i915_gem_free_object,
  929. .gem_vm_ops = &i915_gem_vm_ops,
  930. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  931. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  932. .gem_prime_export = i915_gem_prime_export,
  933. .gem_prime_import = i915_gem_prime_import,
  934. .dumb_create = i915_gem_dumb_create,
  935. .dumb_map_offset = i915_gem_mmap_gtt,
  936. .dumb_destroy = drm_gem_dumb_destroy,
  937. .ioctls = i915_ioctls,
  938. .fops = &i915_driver_fops,
  939. .name = DRIVER_NAME,
  940. .desc = DRIVER_DESC,
  941. .date = DRIVER_DATE,
  942. .major = DRIVER_MAJOR,
  943. .minor = DRIVER_MINOR,
  944. .patchlevel = DRIVER_PATCHLEVEL,
  945. };
  946. static struct pci_driver i915_pci_driver = {
  947. .name = DRIVER_NAME,
  948. .id_table = pciidlist,
  949. .probe = i915_pci_probe,
  950. .remove = i915_pci_remove,
  951. .driver.pm = &i915_pm_ops,
  952. };
  953. static int __init i915_init(void)
  954. {
  955. driver.num_ioctls = i915_max_ioctl;
  956. /*
  957. * If CONFIG_DRM_I915_KMS is set, default to KMS unless
  958. * explicitly disabled with the module pararmeter.
  959. *
  960. * Otherwise, just follow the parameter (defaulting to off).
  961. *
  962. * Allow optional vga_text_mode_force boot option to override
  963. * the default behavior.
  964. */
  965. #if defined(CONFIG_DRM_I915_KMS)
  966. if (i915.modeset != 0)
  967. driver.driver_features |= DRIVER_MODESET;
  968. #endif
  969. if (i915.modeset == 1)
  970. driver.driver_features |= DRIVER_MODESET;
  971. #ifdef CONFIG_VGA_CONSOLE
  972. if (vgacon_text_force() && i915.modeset == -1)
  973. driver.driver_features &= ~DRIVER_MODESET;
  974. #endif
  975. if (!(driver.driver_features & DRIVER_MODESET)) {
  976. driver.get_vblank_timestamp = NULL;
  977. #ifndef CONFIG_DRM_I915_UMS
  978. /* Silently fail loading to not upset userspace. */
  979. return 0;
  980. #endif
  981. }
  982. return drm_pci_init(&driver, &i915_pci_driver);
  983. }
  984. static void __exit i915_exit(void)
  985. {
  986. #ifndef CONFIG_DRM_I915_UMS
  987. if (!(driver.driver_features & DRIVER_MODESET))
  988. return; /* Never loaded a driver. */
  989. #endif
  990. drm_pci_exit(&driver, &i915_pci_driver);
  991. }
  992. module_init(i915_init);
  993. module_exit(i915_exit);
  994. MODULE_AUTHOR(DRIVER_AUTHOR);
  995. MODULE_DESCRIPTION(DRIVER_DESC);
  996. MODULE_LICENSE("GPL and additional rights");