nouveau_drm.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include <linux/console.h>
  25. #include <linux/delay.h>
  26. #include <linux/module.h>
  27. #include <linux/pci.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/vga_switcheroo.h>
  30. #include <drm/drmP.h>
  31. #include <drm/drm_crtc_helper.h>
  32. #include <core/gpuobj.h>
  33. #include <core/option.h>
  34. #include <core/pci.h>
  35. #include <core/tegra.h>
  36. #include <nvif/driver.h>
  37. #include <nvif/fifo.h>
  38. #include <nvif/user.h>
  39. #include <nvif/class.h>
  40. #include <nvif/cl0002.h>
  41. #include <nvif/cla06f.h>
  42. #include <nvif/if0004.h>
  43. #include "nouveau_drv.h"
  44. #include "nouveau_dma.h"
  45. #include "nouveau_ttm.h"
  46. #include "nouveau_gem.h"
  47. #include "nouveau_vga.h"
  48. #include "nouveau_led.h"
  49. #include "nouveau_hwmon.h"
  50. #include "nouveau_acpi.h"
  51. #include "nouveau_bios.h"
  52. #include "nouveau_ioctl.h"
  53. #include "nouveau_abi16.h"
  54. #include "nouveau_fbcon.h"
  55. #include "nouveau_fence.h"
  56. #include "nouveau_debugfs.h"
  57. #include "nouveau_usif.h"
  58. #include "nouveau_connector.h"
  59. #include "nouveau_platform.h"
  60. MODULE_PARM_DESC(config, "option string to pass to driver core");
  61. static char *nouveau_config;
  62. module_param_named(config, nouveau_config, charp, 0400);
  63. MODULE_PARM_DESC(debug, "debug string to pass to driver core");
  64. static char *nouveau_debug;
  65. module_param_named(debug, nouveau_debug, charp, 0400);
  66. MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
  67. static int nouveau_noaccel = 0;
  68. module_param_named(noaccel, nouveau_noaccel, int, 0400);
  69. MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
  70. "0 = disabled, 1 = enabled, 2 = headless)");
  71. int nouveau_modeset = -1;
  72. module_param_named(modeset, nouveau_modeset, int, 0400);
  73. MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
  74. static int nouveau_atomic = 0;
  75. module_param_named(atomic, nouveau_atomic, int, 0400);
  76. MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
  77. static int nouveau_runtime_pm = -1;
  78. module_param_named(runpm, nouveau_runtime_pm, int, 0400);
  79. static struct drm_driver driver_stub;
  80. static struct drm_driver driver_pci;
  81. static struct drm_driver driver_platform;
  82. static u64
  83. nouveau_pci_name(struct pci_dev *pdev)
  84. {
  85. u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
  86. name |= pdev->bus->number << 16;
  87. name |= PCI_SLOT(pdev->devfn) << 8;
  88. return name | PCI_FUNC(pdev->devfn);
  89. }
  90. static u64
  91. nouveau_platform_name(struct platform_device *platformdev)
  92. {
  93. return platformdev->id;
  94. }
  95. static u64
  96. nouveau_name(struct drm_device *dev)
  97. {
  98. if (dev->pdev)
  99. return nouveau_pci_name(dev->pdev);
  100. else
  101. return nouveau_platform_name(to_platform_device(dev->dev));
  102. }
  103. static inline bool
  104. nouveau_cli_work_ready(struct dma_fence *fence)
  105. {
  106. if (!dma_fence_is_signaled(fence))
  107. return false;
  108. dma_fence_put(fence);
  109. return true;
  110. }
  111. static void
  112. nouveau_cli_work(struct work_struct *w)
  113. {
  114. struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
  115. struct nouveau_cli_work *work, *wtmp;
  116. mutex_lock(&cli->lock);
  117. list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
  118. if (!work->fence || nouveau_cli_work_ready(work->fence)) {
  119. list_del(&work->head);
  120. work->func(work);
  121. }
  122. }
  123. mutex_unlock(&cli->lock);
  124. }
  125. static void
  126. nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
  127. {
  128. struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
  129. schedule_work(&work->cli->work);
  130. }
  131. void
  132. nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
  133. struct nouveau_cli_work *work)
  134. {
  135. work->fence = dma_fence_get(fence);
  136. work->cli = cli;
  137. mutex_lock(&cli->lock);
  138. list_add_tail(&work->head, &cli->worker);
  139. if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
  140. nouveau_cli_work_fence(fence, &work->cb);
  141. mutex_unlock(&cli->lock);
  142. }
  143. static void
  144. nouveau_cli_fini(struct nouveau_cli *cli)
  145. {
  146. /* All our channels are dead now, which means all the fences they
  147. * own are signalled, and all callback functions have been called.
  148. *
  149. * So, after flushing the workqueue, there should be nothing left.
  150. */
  151. flush_work(&cli->work);
  152. WARN_ON(!list_empty(&cli->worker));
  153. usif_client_fini(cli);
  154. nouveau_vmm_fini(&cli->vmm);
  155. nvif_mmu_fini(&cli->mmu);
  156. nvif_device_fini(&cli->device);
  157. mutex_lock(&cli->drm->master.lock);
  158. nvif_client_fini(&cli->base);
  159. mutex_unlock(&cli->drm->master.lock);
  160. }
  161. static int
  162. nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
  163. struct nouveau_cli *cli)
  164. {
  165. static const struct nvif_mclass
  166. mems[] = {
  167. { NVIF_CLASS_MEM_GF100, -1 },
  168. { NVIF_CLASS_MEM_NV50 , -1 },
  169. { NVIF_CLASS_MEM_NV04 , -1 },
  170. {}
  171. };
  172. static const struct nvif_mclass
  173. mmus[] = {
  174. { NVIF_CLASS_MMU_GF100, -1 },
  175. { NVIF_CLASS_MMU_NV50 , -1 },
  176. { NVIF_CLASS_MMU_NV04 , -1 },
  177. {}
  178. };
  179. static const struct nvif_mclass
  180. vmms[] = {
  181. { NVIF_CLASS_VMM_GP100, -1 },
  182. { NVIF_CLASS_VMM_GM200, -1 },
  183. { NVIF_CLASS_VMM_GF100, -1 },
  184. { NVIF_CLASS_VMM_NV50 , -1 },
  185. { NVIF_CLASS_VMM_NV04 , -1 },
  186. {}
  187. };
  188. u64 device = nouveau_name(drm->dev);
  189. int ret;
  190. snprintf(cli->name, sizeof(cli->name), "%s", sname);
  191. cli->drm = drm;
  192. mutex_init(&cli->mutex);
  193. usif_client_init(cli);
  194. INIT_WORK(&cli->work, nouveau_cli_work);
  195. INIT_LIST_HEAD(&cli->worker);
  196. mutex_init(&cli->lock);
  197. if (cli == &drm->master) {
  198. ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
  199. cli->name, device, &cli->base);
  200. } else {
  201. mutex_lock(&drm->master.lock);
  202. ret = nvif_client_init(&drm->master.base, cli->name, device,
  203. &cli->base);
  204. mutex_unlock(&drm->master.lock);
  205. }
  206. if (ret) {
  207. NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
  208. goto done;
  209. }
  210. ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE,
  211. &(struct nv_device_v0) {
  212. .device = ~0,
  213. }, sizeof(struct nv_device_v0),
  214. &cli->device);
  215. if (ret) {
  216. NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
  217. goto done;
  218. }
  219. ret = nvif_mclass(&cli->device.object, mmus);
  220. if (ret < 0) {
  221. NV_PRINTK(err, cli, "No supported MMU class\n");
  222. goto done;
  223. }
  224. ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
  225. if (ret) {
  226. NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
  227. goto done;
  228. }
  229. ret = nvif_mclass(&cli->mmu.object, vmms);
  230. if (ret < 0) {
  231. NV_PRINTK(err, cli, "No supported VMM class\n");
  232. goto done;
  233. }
  234. ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
  235. if (ret) {
  236. NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
  237. goto done;
  238. }
  239. ret = nvif_mclass(&cli->mmu.object, mems);
  240. if (ret < 0) {
  241. NV_PRINTK(err, cli, "No supported MEM class\n");
  242. goto done;
  243. }
  244. cli->mem = &mems[ret];
  245. return 0;
  246. done:
  247. if (ret)
  248. nouveau_cli_fini(cli);
  249. return ret;
  250. }
  251. static void
  252. nouveau_accel_fini(struct nouveau_drm *drm)
  253. {
  254. nouveau_channel_idle(drm->channel);
  255. nvif_object_fini(&drm->ntfy);
  256. nvkm_gpuobj_del(&drm->notify);
  257. nvif_notify_fini(&drm->flip);
  258. nvif_object_fini(&drm->nvsw);
  259. nouveau_channel_del(&drm->channel);
  260. nouveau_channel_idle(drm->cechan);
  261. nvif_object_fini(&drm->ttm.copy);
  262. nouveau_channel_del(&drm->cechan);
  263. if (drm->fence)
  264. nouveau_fence(drm)->dtor(drm);
  265. }
  266. static void
  267. nouveau_accel_init(struct nouveau_drm *drm)
  268. {
  269. struct nvif_device *device = &drm->client.device;
  270. struct nvif_sclass *sclass;
  271. u32 arg0, arg1;
  272. int ret, i, n;
  273. if (nouveau_noaccel)
  274. return;
  275. ret = nouveau_channels_init(drm);
  276. if (ret)
  277. return;
  278. if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
  279. ret = nvif_user_init(device);
  280. if (ret)
  281. return;
  282. }
  283. /* initialise synchronisation routines */
  284. /*XXX: this is crap, but the fence/channel stuff is a little
  285. * backwards in some places. this will be fixed.
  286. */
  287. ret = n = nvif_object_sclass_get(&device->object, &sclass);
  288. if (ret < 0)
  289. return;
  290. for (ret = -ENOSYS, i = 0; i < n; i++) {
  291. switch (sclass[i].oclass) {
  292. case NV03_CHANNEL_DMA:
  293. ret = nv04_fence_create(drm);
  294. break;
  295. case NV10_CHANNEL_DMA:
  296. ret = nv10_fence_create(drm);
  297. break;
  298. case NV17_CHANNEL_DMA:
  299. case NV40_CHANNEL_DMA:
  300. ret = nv17_fence_create(drm);
  301. break;
  302. case NV50_CHANNEL_GPFIFO:
  303. ret = nv50_fence_create(drm);
  304. break;
  305. case G82_CHANNEL_GPFIFO:
  306. ret = nv84_fence_create(drm);
  307. break;
  308. case FERMI_CHANNEL_GPFIFO:
  309. case KEPLER_CHANNEL_GPFIFO_A:
  310. case KEPLER_CHANNEL_GPFIFO_B:
  311. case MAXWELL_CHANNEL_GPFIFO_A:
  312. case PASCAL_CHANNEL_GPFIFO_A:
  313. case VOLTA_CHANNEL_GPFIFO_A:
  314. ret = nvc0_fence_create(drm);
  315. break;
  316. default:
  317. break;
  318. }
  319. }
  320. nvif_object_sclass_put(&sclass);
  321. if (ret) {
  322. NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
  323. nouveau_accel_fini(drm);
  324. return;
  325. }
  326. if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
  327. ret = nouveau_channel_new(drm, &drm->client.device,
  328. nvif_fifo_runlist_ce(device), 0,
  329. &drm->cechan);
  330. if (ret)
  331. NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
  332. arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
  333. arg1 = 1;
  334. } else
  335. if (device->info.chipset >= 0xa3 &&
  336. device->info.chipset != 0xaa &&
  337. device->info.chipset != 0xac) {
  338. ret = nouveau_channel_new(drm, &drm->client.device,
  339. NvDmaFB, NvDmaTT, &drm->cechan);
  340. if (ret)
  341. NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
  342. arg0 = NvDmaFB;
  343. arg1 = NvDmaTT;
  344. } else {
  345. arg0 = NvDmaFB;
  346. arg1 = NvDmaTT;
  347. }
  348. ret = nouveau_channel_new(drm, &drm->client.device,
  349. arg0, arg1, &drm->channel);
  350. if (ret) {
  351. NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
  352. nouveau_accel_fini(drm);
  353. return;
  354. }
  355. if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
  356. ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
  357. nouveau_abi16_swclass(drm), NULL, 0,
  358. &drm->nvsw);
  359. if (ret == 0) {
  360. ret = RING_SPACE(drm->channel, 2);
  361. if (ret == 0) {
  362. BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
  363. OUT_RING (drm->channel, drm->nvsw.handle);
  364. }
  365. ret = nvif_notify_init(&drm->nvsw,
  366. nouveau_flip_complete,
  367. false, NV04_NVSW_NTFY_UEVENT,
  368. NULL, 0, 0, &drm->flip);
  369. if (ret == 0)
  370. ret = nvif_notify_get(&drm->flip);
  371. if (ret) {
  372. nouveau_accel_fini(drm);
  373. return;
  374. }
  375. }
  376. if (ret) {
  377. NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
  378. nouveau_accel_fini(drm);
  379. return;
  380. }
  381. }
  382. if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
  383. ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0,
  384. false, NULL, &drm->notify);
  385. if (ret) {
  386. NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
  387. nouveau_accel_fini(drm);
  388. return;
  389. }
  390. ret = nvif_object_init(&drm->channel->user, NvNotify0,
  391. NV_DMA_IN_MEMORY,
  392. &(struct nv_dma_v0) {
  393. .target = NV_DMA_V0_TARGET_VRAM,
  394. .access = NV_DMA_V0_ACCESS_RDWR,
  395. .start = drm->notify->addr,
  396. .limit = drm->notify->addr + 31
  397. }, sizeof(struct nv_dma_v0),
  398. &drm->ntfy);
  399. if (ret) {
  400. nouveau_accel_fini(drm);
  401. return;
  402. }
  403. }
  404. nouveau_bo_move_init(drm);
  405. }
  406. static int
  407. nouveau_drm_device_init(struct drm_device *dev)
  408. {
  409. struct nouveau_drm *drm;
  410. int ret;
  411. if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
  412. return -ENOMEM;
  413. dev->dev_private = drm;
  414. drm->dev = dev;
  415. ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
  416. if (ret)
  417. goto fail_alloc;
  418. ret = nouveau_cli_init(drm, "DRM", &drm->client);
  419. if (ret)
  420. goto fail_master;
  421. dev->irq_enabled = true;
  422. nvxx_client(&drm->client.base)->debug =
  423. nvkm_dbgopt(nouveau_debug, "DRM");
  424. INIT_LIST_HEAD(&drm->clients);
  425. spin_lock_init(&drm->tile.lock);
  426. /* workaround an odd issue on nvc1 by disabling the device's
  427. * nosnoop capability. hopefully won't cause issues until a
  428. * better fix is found - assuming there is one...
  429. */
  430. if (drm->client.device.info.chipset == 0xc1)
  431. nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000);
  432. nouveau_vga_init(drm);
  433. ret = nouveau_ttm_init(drm);
  434. if (ret)
  435. goto fail_ttm;
  436. ret = nouveau_bios_init(dev);
  437. if (ret)
  438. goto fail_bios;
  439. ret = nouveau_display_create(dev);
  440. if (ret)
  441. goto fail_dispctor;
  442. if (dev->mode_config.num_crtc) {
  443. ret = nouveau_display_init(dev);
  444. if (ret)
  445. goto fail_dispinit;
  446. }
  447. nouveau_debugfs_init(drm);
  448. nouveau_hwmon_init(dev);
  449. nouveau_accel_init(drm);
  450. nouveau_fbcon_init(dev);
  451. nouveau_led_init(dev);
  452. if (nouveau_pmops_runtime()) {
  453. pm_runtime_use_autosuspend(dev->dev);
  454. pm_runtime_set_autosuspend_delay(dev->dev, 5000);
  455. pm_runtime_set_active(dev->dev);
  456. pm_runtime_allow(dev->dev);
  457. pm_runtime_mark_last_busy(dev->dev);
  458. pm_runtime_put(dev->dev);
  459. }
  460. return 0;
  461. fail_dispinit:
  462. nouveau_display_destroy(dev);
  463. fail_dispctor:
  464. nouveau_bios_takedown(dev);
  465. fail_bios:
  466. nouveau_ttm_fini(drm);
  467. fail_ttm:
  468. nouveau_vga_fini(drm);
  469. nouveau_cli_fini(&drm->client);
  470. fail_master:
  471. nouveau_cli_fini(&drm->master);
  472. fail_alloc:
  473. kfree(drm);
  474. return ret;
  475. }
  476. static void
  477. nouveau_drm_device_fini(struct drm_device *dev)
  478. {
  479. struct nouveau_drm *drm = nouveau_drm(dev);
  480. if (nouveau_pmops_runtime()) {
  481. pm_runtime_get_sync(dev->dev);
  482. pm_runtime_forbid(dev->dev);
  483. }
  484. nouveau_led_fini(dev);
  485. nouveau_fbcon_fini(dev);
  486. nouveau_accel_fini(drm);
  487. nouveau_hwmon_fini(dev);
  488. nouveau_debugfs_fini(drm);
  489. if (dev->mode_config.num_crtc)
  490. nouveau_display_fini(dev, false, false);
  491. nouveau_display_destroy(dev);
  492. nouveau_bios_takedown(dev);
  493. nouveau_ttm_fini(drm);
  494. nouveau_vga_fini(drm);
  495. nouveau_cli_fini(&drm->client);
  496. nouveau_cli_fini(&drm->master);
  497. kfree(drm);
  498. }
  499. static int nouveau_drm_probe(struct pci_dev *pdev,
  500. const struct pci_device_id *pent)
  501. {
  502. struct nvkm_device *device;
  503. struct drm_device *drm_dev;
  504. struct apertures_struct *aper;
  505. bool boot = false;
  506. int ret;
  507. if (vga_switcheroo_client_probe_defer(pdev))
  508. return -EPROBE_DEFER;
  509. /* We need to check that the chipset is supported before booting
  510. * fbdev off the hardware, as there's no way to put it back.
  511. */
  512. ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
  513. if (ret)
  514. return ret;
  515. nvkm_device_del(&device);
  516. /* Remove conflicting drivers (vesafb, efifb etc). */
  517. aper = alloc_apertures(3);
  518. if (!aper)
  519. return -ENOMEM;
  520. aper->ranges[0].base = pci_resource_start(pdev, 1);
  521. aper->ranges[0].size = pci_resource_len(pdev, 1);
  522. aper->count = 1;
  523. if (pci_resource_len(pdev, 2)) {
  524. aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
  525. aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
  526. aper->count++;
  527. }
  528. if (pci_resource_len(pdev, 3)) {
  529. aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
  530. aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
  531. aper->count++;
  532. }
  533. #ifdef CONFIG_X86
  534. boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
  535. #endif
  536. if (nouveau_modeset != 2)
  537. drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
  538. kfree(aper);
  539. ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
  540. true, true, ~0ULL, &device);
  541. if (ret)
  542. return ret;
  543. pci_set_master(pdev);
  544. if (nouveau_atomic)
  545. driver_pci.driver_features |= DRIVER_ATOMIC;
  546. drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev);
  547. if (IS_ERR(drm_dev)) {
  548. ret = PTR_ERR(drm_dev);
  549. goto fail_nvkm;
  550. }
  551. ret = pci_enable_device(pdev);
  552. if (ret)
  553. goto fail_drm;
  554. drm_dev->pdev = pdev;
  555. pci_set_drvdata(pdev, drm_dev);
  556. ret = nouveau_drm_device_init(drm_dev);
  557. if (ret)
  558. goto fail_pci;
  559. ret = drm_dev_register(drm_dev, pent->driver_data);
  560. if (ret)
  561. goto fail_drm_dev_init;
  562. return 0;
  563. fail_drm_dev_init:
  564. nouveau_drm_device_fini(drm_dev);
  565. fail_pci:
  566. pci_disable_device(pdev);
  567. fail_drm:
  568. drm_dev_put(drm_dev);
  569. fail_nvkm:
  570. nvkm_device_del(&device);
  571. return ret;
  572. }
  573. void
  574. nouveau_drm_device_remove(struct drm_device *dev)
  575. {
  576. struct pci_dev *pdev = dev->pdev;
  577. struct nouveau_drm *drm = nouveau_drm(dev);
  578. struct nvkm_client *client;
  579. struct nvkm_device *device;
  580. drm_dev_unregister(dev);
  581. dev->irq_enabled = false;
  582. client = nvxx_client(&drm->client.base);
  583. device = nvkm_device_find(client->device);
  584. nouveau_drm_device_fini(dev);
  585. pci_disable_device(pdev);
  586. drm_dev_put(dev);
  587. nvkm_device_del(&device);
  588. }
  589. static void
  590. nouveau_drm_remove(struct pci_dev *pdev)
  591. {
  592. struct drm_device *dev = pci_get_drvdata(pdev);
  593. nouveau_drm_device_remove(dev);
  594. }
  595. static int
  596. nouveau_do_suspend(struct drm_device *dev, bool runtime)
  597. {
  598. struct nouveau_drm *drm = nouveau_drm(dev);
  599. int ret;
  600. nouveau_led_suspend(dev);
  601. if (dev->mode_config.num_crtc) {
  602. NV_DEBUG(drm, "suspending console...\n");
  603. nouveau_fbcon_set_suspend(dev, 1);
  604. NV_DEBUG(drm, "suspending display...\n");
  605. ret = nouveau_display_suspend(dev, runtime);
  606. if (ret)
  607. return ret;
  608. }
  609. NV_DEBUG(drm, "evicting buffers...\n");
  610. ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
  611. NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
  612. if (drm->cechan) {
  613. ret = nouveau_channel_idle(drm->cechan);
  614. if (ret)
  615. goto fail_display;
  616. }
  617. if (drm->channel) {
  618. ret = nouveau_channel_idle(drm->channel);
  619. if (ret)
  620. goto fail_display;
  621. }
  622. NV_DEBUG(drm, "suspending fence...\n");
  623. if (drm->fence && nouveau_fence(drm)->suspend) {
  624. if (!nouveau_fence(drm)->suspend(drm)) {
  625. ret = -ENOMEM;
  626. goto fail_display;
  627. }
  628. }
  629. NV_DEBUG(drm, "suspending object tree...\n");
  630. ret = nvif_client_suspend(&drm->master.base);
  631. if (ret)
  632. goto fail_client;
  633. return 0;
  634. fail_client:
  635. if (drm->fence && nouveau_fence(drm)->resume)
  636. nouveau_fence(drm)->resume(drm);
  637. fail_display:
  638. if (dev->mode_config.num_crtc) {
  639. NV_DEBUG(drm, "resuming display...\n");
  640. nouveau_display_resume(dev, runtime);
  641. }
  642. return ret;
  643. }
  644. static int
  645. nouveau_do_resume(struct drm_device *dev, bool runtime)
  646. {
  647. struct nouveau_drm *drm = nouveau_drm(dev);
  648. NV_DEBUG(drm, "resuming object tree...\n");
  649. nvif_client_resume(&drm->master.base);
  650. NV_DEBUG(drm, "resuming fence...\n");
  651. if (drm->fence && nouveau_fence(drm)->resume)
  652. nouveau_fence(drm)->resume(drm);
  653. nouveau_run_vbios_init(dev);
  654. if (dev->mode_config.num_crtc) {
  655. NV_DEBUG(drm, "resuming display...\n");
  656. nouveau_display_resume(dev, runtime);
  657. NV_DEBUG(drm, "resuming console...\n");
  658. nouveau_fbcon_set_suspend(dev, 0);
  659. }
  660. nouveau_led_resume(dev);
  661. return 0;
  662. }
  663. int
  664. nouveau_pmops_suspend(struct device *dev)
  665. {
  666. struct pci_dev *pdev = to_pci_dev(dev);
  667. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  668. int ret;
  669. if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
  670. drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
  671. return 0;
  672. ret = nouveau_do_suspend(drm_dev, false);
  673. if (ret)
  674. return ret;
  675. pci_save_state(pdev);
  676. pci_disable_device(pdev);
  677. pci_set_power_state(pdev, PCI_D3hot);
  678. udelay(200);
  679. return 0;
  680. }
  681. int
  682. nouveau_pmops_resume(struct device *dev)
  683. {
  684. struct pci_dev *pdev = to_pci_dev(dev);
  685. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  686. int ret;
  687. if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
  688. drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
  689. return 0;
  690. pci_set_power_state(pdev, PCI_D0);
  691. pci_restore_state(pdev);
  692. ret = pci_enable_device(pdev);
  693. if (ret)
  694. return ret;
  695. pci_set_master(pdev);
  696. ret = nouveau_do_resume(drm_dev, false);
  697. /* Monitors may have been connected / disconnected during suspend */
  698. schedule_work(&nouveau_drm(drm_dev)->hpd_work);
  699. return ret;
  700. }
  701. static int
  702. nouveau_pmops_freeze(struct device *dev)
  703. {
  704. struct pci_dev *pdev = to_pci_dev(dev);
  705. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  706. return nouveau_do_suspend(drm_dev, false);
  707. }
  708. static int
  709. nouveau_pmops_thaw(struct device *dev)
  710. {
  711. struct pci_dev *pdev = to_pci_dev(dev);
  712. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  713. return nouveau_do_resume(drm_dev, false);
  714. }
  715. bool
  716. nouveau_pmops_runtime(void)
  717. {
  718. if (nouveau_runtime_pm == -1)
  719. return nouveau_is_optimus() || nouveau_is_v1_dsm();
  720. return nouveau_runtime_pm == 1;
  721. }
  722. static int
  723. nouveau_pmops_runtime_suspend(struct device *dev)
  724. {
  725. struct pci_dev *pdev = to_pci_dev(dev);
  726. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  727. int ret;
  728. if (!nouveau_pmops_runtime()) {
  729. pm_runtime_forbid(dev);
  730. return -EBUSY;
  731. }
  732. nouveau_switcheroo_optimus_dsm();
  733. ret = nouveau_do_suspend(drm_dev, true);
  734. pci_save_state(pdev);
  735. pci_disable_device(pdev);
  736. pci_ignore_hotplug(pdev);
  737. pci_set_power_state(pdev, PCI_D3cold);
  738. drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
  739. return ret;
  740. }
  741. static int
  742. nouveau_pmops_runtime_resume(struct device *dev)
  743. {
  744. struct pci_dev *pdev = to_pci_dev(dev);
  745. struct drm_device *drm_dev = pci_get_drvdata(pdev);
  746. struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
  747. int ret;
  748. if (!nouveau_pmops_runtime()) {
  749. pm_runtime_forbid(dev);
  750. return -EBUSY;
  751. }
  752. pci_set_power_state(pdev, PCI_D0);
  753. pci_restore_state(pdev);
  754. ret = pci_enable_device(pdev);
  755. if (ret)
  756. return ret;
  757. pci_set_master(pdev);
  758. ret = nouveau_do_resume(drm_dev, true);
  759. /* do magic */
  760. nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
  761. drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
  762. /* Monitors may have been connected / disconnected during suspend */
  763. schedule_work(&nouveau_drm(drm_dev)->hpd_work);
  764. return ret;
  765. }
  766. static int
  767. nouveau_pmops_runtime_idle(struct device *dev)
  768. {
  769. if (!nouveau_pmops_runtime()) {
  770. pm_runtime_forbid(dev);
  771. return -EBUSY;
  772. }
  773. pm_runtime_mark_last_busy(dev);
  774. pm_runtime_autosuspend(dev);
  775. /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
  776. return 1;
  777. }
  778. static int
  779. nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
  780. {
  781. struct nouveau_drm *drm = nouveau_drm(dev);
  782. struct nouveau_cli *cli;
  783. char name[32], tmpname[TASK_COMM_LEN];
  784. int ret;
  785. /* need to bring up power immediately if opening device */
  786. ret = pm_runtime_get_sync(dev->dev);
  787. if (ret < 0 && ret != -EACCES)
  788. return ret;
  789. get_task_comm(tmpname, current);
  790. snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
  791. if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
  792. ret = -ENOMEM;
  793. goto done;
  794. }
  795. ret = nouveau_cli_init(drm, name, cli);
  796. if (ret)
  797. goto done;
  798. cli->base.super = false;
  799. fpriv->driver_priv = cli;
  800. mutex_lock(&drm->client.mutex);
  801. list_add(&cli->head, &drm->clients);
  802. mutex_unlock(&drm->client.mutex);
  803. done:
  804. if (ret && cli) {
  805. nouveau_cli_fini(cli);
  806. kfree(cli);
  807. }
  808. pm_runtime_mark_last_busy(dev->dev);
  809. pm_runtime_put_autosuspend(dev->dev);
  810. return ret;
  811. }
  812. static void
  813. nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
  814. {
  815. struct nouveau_cli *cli = nouveau_cli(fpriv);
  816. struct nouveau_drm *drm = nouveau_drm(dev);
  817. pm_runtime_get_sync(dev->dev);
  818. mutex_lock(&cli->mutex);
  819. if (cli->abi16)
  820. nouveau_abi16_fini(cli->abi16);
  821. mutex_unlock(&cli->mutex);
  822. mutex_lock(&drm->client.mutex);
  823. list_del(&cli->head);
  824. mutex_unlock(&drm->client.mutex);
  825. nouveau_cli_fini(cli);
  826. kfree(cli);
  827. pm_runtime_mark_last_busy(dev->dev);
  828. pm_runtime_put_autosuspend(dev->dev);
  829. }
  830. static const struct drm_ioctl_desc
  831. nouveau_ioctls[] = {
  832. DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
  833. DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  834. DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
  835. DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_AUTH|DRM_RENDER_ALLOW),
  836. DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
  837. DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
  838. DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW),
  839. DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW),
  840. DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW),
  841. DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
  842. DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
  843. DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
  844. };
  845. long
  846. nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  847. {
  848. struct drm_file *filp = file->private_data;
  849. struct drm_device *dev = filp->minor->dev;
  850. long ret;
  851. ret = pm_runtime_get_sync(dev->dev);
  852. if (ret < 0 && ret != -EACCES)
  853. return ret;
  854. switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
  855. case DRM_NOUVEAU_NVIF:
  856. ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
  857. break;
  858. default:
  859. ret = drm_ioctl(file, cmd, arg);
  860. break;
  861. }
  862. pm_runtime_mark_last_busy(dev->dev);
  863. pm_runtime_put_autosuspend(dev->dev);
  864. return ret;
  865. }
  866. static const struct file_operations
  867. nouveau_driver_fops = {
  868. .owner = THIS_MODULE,
  869. .open = drm_open,
  870. .release = drm_release,
  871. .unlocked_ioctl = nouveau_drm_ioctl,
  872. .mmap = nouveau_ttm_mmap,
  873. .poll = drm_poll,
  874. .read = drm_read,
  875. #if defined(CONFIG_COMPAT)
  876. .compat_ioctl = nouveau_compat_ioctl,
  877. #endif
  878. .llseek = noop_llseek,
  879. };
  880. static struct drm_driver
  881. driver_stub = {
  882. .driver_features =
  883. DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
  884. DRIVER_KMS_LEGACY_CONTEXT,
  885. .open = nouveau_drm_open,
  886. .postclose = nouveau_drm_postclose,
  887. .lastclose = nouveau_vga_lastclose,
  888. #if defined(CONFIG_DEBUG_FS)
  889. .debugfs_init = nouveau_drm_debugfs_init,
  890. #endif
  891. .enable_vblank = nouveau_display_vblank_enable,
  892. .disable_vblank = nouveau_display_vblank_disable,
  893. .get_scanout_position = nouveau_display_scanoutpos,
  894. .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
  895. .ioctls = nouveau_ioctls,
  896. .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
  897. .fops = &nouveau_driver_fops,
  898. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  899. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  900. .gem_prime_export = drm_gem_prime_export,
  901. .gem_prime_import = drm_gem_prime_import,
  902. .gem_prime_pin = nouveau_gem_prime_pin,
  903. .gem_prime_res_obj = nouveau_gem_prime_res_obj,
  904. .gem_prime_unpin = nouveau_gem_prime_unpin,
  905. .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
  906. .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
  907. .gem_prime_vmap = nouveau_gem_prime_vmap,
  908. .gem_prime_vunmap = nouveau_gem_prime_vunmap,
  909. .gem_free_object_unlocked = nouveau_gem_object_del,
  910. .gem_open_object = nouveau_gem_object_open,
  911. .gem_close_object = nouveau_gem_object_close,
  912. .dumb_create = nouveau_display_dumb_create,
  913. .dumb_map_offset = nouveau_display_dumb_map_offset,
  914. .name = DRIVER_NAME,
  915. .desc = DRIVER_DESC,
  916. #ifdef GIT_REVISION
  917. .date = GIT_REVISION,
  918. #else
  919. .date = DRIVER_DATE,
  920. #endif
  921. .major = DRIVER_MAJOR,
  922. .minor = DRIVER_MINOR,
  923. .patchlevel = DRIVER_PATCHLEVEL,
  924. };
  925. static struct pci_device_id
  926. nouveau_drm_pci_table[] = {
  927. {
  928. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
  929. .class = PCI_BASE_CLASS_DISPLAY << 16,
  930. .class_mask = 0xff << 16,
  931. },
  932. {
  933. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
  934. .class = PCI_BASE_CLASS_DISPLAY << 16,
  935. .class_mask = 0xff << 16,
  936. },
  937. {}
  938. };
  939. static void nouveau_display_options(void)
  940. {
  941. DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
  942. DRM_DEBUG_DRIVER("... tv_disable : %d\n", nouveau_tv_disable);
  943. DRM_DEBUG_DRIVER("... ignorelid : %d\n", nouveau_ignorelid);
  944. DRM_DEBUG_DRIVER("... duallink : %d\n", nouveau_duallink);
  945. DRM_DEBUG_DRIVER("... nofbaccel : %d\n", nouveau_nofbaccel);
  946. DRM_DEBUG_DRIVER("... config : %s\n", nouveau_config);
  947. DRM_DEBUG_DRIVER("... debug : %s\n", nouveau_debug);
  948. DRM_DEBUG_DRIVER("... noaccel : %d\n", nouveau_noaccel);
  949. DRM_DEBUG_DRIVER("... modeset : %d\n", nouveau_modeset);
  950. DRM_DEBUG_DRIVER("... runpm : %d\n", nouveau_runtime_pm);
  951. DRM_DEBUG_DRIVER("... vram_pushbuf : %d\n", nouveau_vram_pushbuf);
  952. DRM_DEBUG_DRIVER("... hdmimhz : %d\n", nouveau_hdmimhz);
  953. }
  954. static const struct dev_pm_ops nouveau_pm_ops = {
  955. .suspend = nouveau_pmops_suspend,
  956. .resume = nouveau_pmops_resume,
  957. .freeze = nouveau_pmops_freeze,
  958. .thaw = nouveau_pmops_thaw,
  959. .poweroff = nouveau_pmops_freeze,
  960. .restore = nouveau_pmops_resume,
  961. .runtime_suspend = nouveau_pmops_runtime_suspend,
  962. .runtime_resume = nouveau_pmops_runtime_resume,
  963. .runtime_idle = nouveau_pmops_runtime_idle,
  964. };
  965. static struct pci_driver
  966. nouveau_drm_pci_driver = {
  967. .name = "nouveau",
  968. .id_table = nouveau_drm_pci_table,
  969. .probe = nouveau_drm_probe,
  970. .remove = nouveau_drm_remove,
  971. .driver.pm = &nouveau_pm_ops,
  972. };
  973. struct drm_device *
  974. nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
  975. struct platform_device *pdev,
  976. struct nvkm_device **pdevice)
  977. {
  978. struct drm_device *drm;
  979. int err;
  980. err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
  981. true, true, ~0ULL, pdevice);
  982. if (err)
  983. goto err_free;
  984. drm = drm_dev_alloc(&driver_platform, &pdev->dev);
  985. if (IS_ERR(drm)) {
  986. err = PTR_ERR(drm);
  987. goto err_free;
  988. }
  989. err = nouveau_drm_device_init(drm);
  990. if (err)
  991. goto err_put;
  992. platform_set_drvdata(pdev, drm);
  993. return drm;
  994. err_put:
  995. drm_dev_put(drm);
  996. err_free:
  997. nvkm_device_del(pdevice);
  998. return ERR_PTR(err);
  999. }
  1000. static int __init
  1001. nouveau_drm_init(void)
  1002. {
  1003. driver_pci = driver_stub;
  1004. driver_platform = driver_stub;
  1005. nouveau_display_options();
  1006. if (nouveau_modeset == -1) {
  1007. if (vgacon_text_force())
  1008. nouveau_modeset = 0;
  1009. }
  1010. if (!nouveau_modeset)
  1011. return 0;
  1012. #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
  1013. platform_driver_register(&nouveau_platform_driver);
  1014. #endif
  1015. nouveau_register_dsm_handler();
  1016. nouveau_backlight_ctor();
  1017. #ifdef CONFIG_PCI
  1018. return pci_register_driver(&nouveau_drm_pci_driver);
  1019. #else
  1020. return 0;
  1021. #endif
  1022. }
  1023. static void __exit
  1024. nouveau_drm_exit(void)
  1025. {
  1026. if (!nouveau_modeset)
  1027. return;
  1028. #ifdef CONFIG_PCI
  1029. pci_unregister_driver(&nouveau_drm_pci_driver);
  1030. #endif
  1031. nouveau_backlight_dtor();
  1032. nouveau_unregister_dsm_handler();
  1033. #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
  1034. platform_driver_unregister(&nouveau_platform_driver);
  1035. #endif
  1036. }
  1037. module_init(nouveau_drm_init);
  1038. module_exit(nouveau_drm_exit);
  1039. MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
  1040. MODULE_AUTHOR(DRIVER_AUTHOR);
  1041. MODULE_DESCRIPTION(DRIVER_DESC);
  1042. MODULE_LICENSE("GPL and additional rights");