drm_stub.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /**
  2. * \file drm_stub.h
  3. * Stub support
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. */
  7. /*
  8. * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
  9. *
  10. * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
  11. * All Rights Reserved.
  12. *
  13. * Permission is hereby granted, free of charge, to any person obtaining a
  14. * copy of this software and associated documentation files (the "Software"),
  15. * to deal in the Software without restriction, including without limitation
  16. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  17. * and/or sell copies of the Software, and to permit persons to whom the
  18. * Software is furnished to do so, subject to the following conditions:
  19. *
  20. * The above copyright notice and this permission notice (including the next
  21. * paragraph) shall be included in all copies or substantial portions of the
  22. * Software.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  27. * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  28. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  29. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  30. * DEALINGS IN THE SOFTWARE.
  31. */
  32. #include <linux/fs.h>
  33. #include <linux/module.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/mount.h>
  36. #include <linux/slab.h>
  37. #include <drm/drmP.h>
  38. #include <drm/drm_core.h>
  39. unsigned int drm_debug = 0; /* 1 to enable debug output */
  40. EXPORT_SYMBOL(drm_debug);
  41. unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
  42. EXPORT_SYMBOL(drm_rnodes);
  43. unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
  44. EXPORT_SYMBOL(drm_vblank_offdelay);
  45. unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
  46. EXPORT_SYMBOL(drm_timestamp_precision);
  47. /*
  48. * Default to use monotonic timestamps for wait-for-vblank and page-flip
  49. * complete events.
  50. */
  51. unsigned int drm_timestamp_monotonic = 1;
  52. MODULE_AUTHOR(CORE_AUTHOR);
  53. MODULE_DESCRIPTION(CORE_DESC);
  54. MODULE_LICENSE("GPL and additional rights");
  55. MODULE_PARM_DESC(debug, "Enable debug output");
  56. MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
  57. MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
  58. MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
  59. MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
  60. module_param_named(debug, drm_debug, int, 0600);
  61. module_param_named(rnodes, drm_rnodes, int, 0600);
  62. module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
  63. module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
  64. module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
  65. struct idr drm_minors_idr;
  66. struct class *drm_class;
  67. struct dentry *drm_debugfs_root;
  68. int drm_err(const char *func, const char *format, ...)
  69. {
  70. struct va_format vaf;
  71. va_list args;
  72. int r;
  73. va_start(args, format);
  74. vaf.fmt = format;
  75. vaf.va = &args;
  76. r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
  77. va_end(args);
  78. return r;
  79. }
  80. EXPORT_SYMBOL(drm_err);
  81. void drm_ut_debug_printk(unsigned int request_level,
  82. const char *prefix,
  83. const char *function_name,
  84. const char *format, ...)
  85. {
  86. struct va_format vaf;
  87. va_list args;
  88. if (drm_debug & request_level) {
  89. va_start(args, format);
  90. vaf.fmt = format;
  91. vaf.va = &args;
  92. if (function_name)
  93. printk(KERN_DEBUG "[%s:%s], %pV", prefix,
  94. function_name, &vaf);
  95. else
  96. printk(KERN_DEBUG "%pV", &vaf);
  97. va_end(args);
  98. }
  99. }
  100. EXPORT_SYMBOL(drm_ut_debug_printk);
  101. static int drm_minor_get_id(struct drm_device *dev, int type)
  102. {
  103. int ret;
  104. int base = 0, limit = 63;
  105. if (type == DRM_MINOR_CONTROL) {
  106. base += 64;
  107. limit = base + 63;
  108. } else if (type == DRM_MINOR_RENDER) {
  109. base += 128;
  110. limit = base + 63;
  111. }
  112. mutex_lock(&dev->struct_mutex);
  113. ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
  114. mutex_unlock(&dev->struct_mutex);
  115. return ret == -ENOSPC ? -EINVAL : ret;
  116. }
  117. struct drm_master *drm_master_create(struct drm_minor *minor)
  118. {
  119. struct drm_master *master;
  120. master = kzalloc(sizeof(*master), GFP_KERNEL);
  121. if (!master)
  122. return NULL;
  123. kref_init(&master->refcount);
  124. spin_lock_init(&master->lock.spinlock);
  125. init_waitqueue_head(&master->lock.lock_queue);
  126. drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
  127. INIT_LIST_HEAD(&master->magicfree);
  128. master->minor = minor;
  129. list_add_tail(&master->head, &minor->master_list);
  130. return master;
  131. }
  132. struct drm_master *drm_master_get(struct drm_master *master)
  133. {
  134. kref_get(&master->refcount);
  135. return master;
  136. }
  137. EXPORT_SYMBOL(drm_master_get);
  138. static void drm_master_destroy(struct kref *kref)
  139. {
  140. struct drm_master *master = container_of(kref, struct drm_master, refcount);
  141. struct drm_magic_entry *pt, *next;
  142. struct drm_device *dev = master->minor->dev;
  143. struct drm_map_list *r_list, *list_temp;
  144. list_del(&master->head);
  145. if (dev->driver->master_destroy)
  146. dev->driver->master_destroy(dev, master);
  147. list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
  148. if (r_list->master == master) {
  149. drm_rmmap_locked(dev, r_list->map);
  150. r_list = NULL;
  151. }
  152. }
  153. if (master->unique) {
  154. kfree(master->unique);
  155. master->unique = NULL;
  156. master->unique_len = 0;
  157. }
  158. kfree(dev->devname);
  159. dev->devname = NULL;
  160. list_for_each_entry_safe(pt, next, &master->magicfree, head) {
  161. list_del(&pt->head);
  162. drm_ht_remove_item(&master->magiclist, &pt->hash_item);
  163. kfree(pt);
  164. }
  165. drm_ht_remove(&master->magiclist);
  166. kfree(master);
  167. }
  168. void drm_master_put(struct drm_master **master)
  169. {
  170. kref_put(&(*master)->refcount, drm_master_destroy);
  171. *master = NULL;
  172. }
  173. EXPORT_SYMBOL(drm_master_put);
  174. int drm_setmaster_ioctl(struct drm_device *dev, void *data,
  175. struct drm_file *file_priv)
  176. {
  177. int ret = 0;
  178. if (file_priv->is_master)
  179. return 0;
  180. if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
  181. return -EINVAL;
  182. if (!file_priv->master)
  183. return -EINVAL;
  184. if (file_priv->minor->master)
  185. return -EINVAL;
  186. mutex_lock(&dev->struct_mutex);
  187. file_priv->minor->master = drm_master_get(file_priv->master);
  188. file_priv->is_master = 1;
  189. if (dev->driver->master_set) {
  190. ret = dev->driver->master_set(dev, file_priv, false);
  191. if (unlikely(ret != 0)) {
  192. file_priv->is_master = 0;
  193. drm_master_put(&file_priv->minor->master);
  194. }
  195. }
  196. mutex_unlock(&dev->struct_mutex);
  197. return ret;
  198. }
  199. int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
  200. struct drm_file *file_priv)
  201. {
  202. if (!file_priv->is_master)
  203. return -EINVAL;
  204. if (!file_priv->minor->master)
  205. return -EINVAL;
  206. mutex_lock(&dev->struct_mutex);
  207. if (dev->driver->master_drop)
  208. dev->driver->master_drop(dev, file_priv, false);
  209. drm_master_put(&file_priv->minor->master);
  210. file_priv->is_master = 0;
  211. mutex_unlock(&dev->struct_mutex);
  212. return 0;
  213. }
  214. /**
  215. * drm_get_minor - Allocate and register new DRM minor
  216. * @dev: DRM device
  217. * @minor: Pointer to where new minor is stored
  218. * @type: Type of minor
  219. *
  220. * Allocate a new minor of the given type and register it. A pointer to the new
  221. * minor is returned in @minor.
  222. * Caller must hold the global DRM mutex.
  223. *
  224. * RETURNS:
  225. * 0 on success, negative error code on failure.
  226. */
  227. static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
  228. int type)
  229. {
  230. struct drm_minor *new_minor;
  231. int ret;
  232. int minor_id;
  233. DRM_DEBUG("\n");
  234. minor_id = drm_minor_get_id(dev, type);
  235. if (minor_id < 0)
  236. return minor_id;
  237. new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
  238. if (!new_minor) {
  239. ret = -ENOMEM;
  240. goto err_idr;
  241. }
  242. new_minor->type = type;
  243. new_minor->device = MKDEV(DRM_MAJOR, minor_id);
  244. new_minor->dev = dev;
  245. new_minor->index = minor_id;
  246. INIT_LIST_HEAD(&new_minor->master_list);
  247. idr_replace(&drm_minors_idr, new_minor, minor_id);
  248. #if defined(CONFIG_DEBUG_FS)
  249. ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
  250. if (ret) {
  251. DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
  252. goto err_mem;
  253. }
  254. #endif
  255. ret = drm_sysfs_device_add(new_minor);
  256. if (ret) {
  257. printk(KERN_ERR
  258. "DRM: Error sysfs_device_add.\n");
  259. goto err_debugfs;
  260. }
  261. *minor = new_minor;
  262. DRM_DEBUG("new minor assigned %d\n", minor_id);
  263. return 0;
  264. err_debugfs:
  265. #if defined(CONFIG_DEBUG_FS)
  266. drm_debugfs_cleanup(new_minor);
  267. err_mem:
  268. #endif
  269. kfree(new_minor);
  270. err_idr:
  271. idr_remove(&drm_minors_idr, minor_id);
  272. *minor = NULL;
  273. return ret;
  274. }
  275. /**
  276. * drm_unplug_minor - Unplug DRM minor
  277. * @minor: Minor to unplug
  278. *
  279. * Unplugs the given DRM minor but keeps the object. So after this returns,
  280. * minor->dev is still valid so existing open-files can still access it to get
  281. * device information from their drm_file ojects.
  282. * If the minor is already unplugged or if @minor is NULL, nothing is done.
  283. * The global DRM mutex must be held by the caller.
  284. */
  285. static void drm_unplug_minor(struct drm_minor *minor)
  286. {
  287. if (!minor || !minor->kdev)
  288. return;
  289. #if defined(CONFIG_DEBUG_FS)
  290. drm_debugfs_cleanup(minor);
  291. #endif
  292. drm_sysfs_device_remove(minor);
  293. idr_remove(&drm_minors_idr, minor->index);
  294. }
  295. /**
  296. * drm_put_minor - Destroy DRM minor
  297. * @minor: Minor to destroy
  298. *
  299. * This calls drm_unplug_minor() on the given minor and then frees it. Nothing
  300. * is done if @minor is NULL. It is fine to call this on already unplugged
  301. * minors.
  302. * The global DRM mutex must be held by the caller.
  303. */
  304. static void drm_put_minor(struct drm_minor *minor)
  305. {
  306. if (!minor)
  307. return;
  308. DRM_DEBUG("release secondary minor %d\n", minor->index);
  309. drm_unplug_minor(minor);
  310. kfree(minor);
  311. }
  312. /**
  313. * Called via drm_exit() at module unload time or when pci device is
  314. * unplugged.
  315. *
  316. * Cleans up all DRM device, calling drm_lastclose().
  317. *
  318. */
  319. void drm_put_dev(struct drm_device *dev)
  320. {
  321. DRM_DEBUG("\n");
  322. if (!dev) {
  323. DRM_ERROR("cleanup called no dev\n");
  324. return;
  325. }
  326. drm_dev_unregister(dev);
  327. drm_dev_free(dev);
  328. }
  329. EXPORT_SYMBOL(drm_put_dev);
  330. void drm_unplug_dev(struct drm_device *dev)
  331. {
  332. /* for a USB device */
  333. if (drm_core_check_feature(dev, DRIVER_MODESET))
  334. drm_unplug_minor(dev->control);
  335. if (dev->render)
  336. drm_unplug_minor(dev->render);
  337. drm_unplug_minor(dev->primary);
  338. mutex_lock(&drm_global_mutex);
  339. drm_device_set_unplugged(dev);
  340. if (dev->open_count == 0) {
  341. drm_put_dev(dev);
  342. }
  343. mutex_unlock(&drm_global_mutex);
  344. }
  345. EXPORT_SYMBOL(drm_unplug_dev);
  346. /*
  347. * DRM internal mount
  348. * We want to be able to allocate our own "struct address_space" to control
  349. * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
  350. * stand-alone address_space objects, so we need an underlying inode. As there
  351. * is no way to allocate an independent inode easily, we need a fake internal
  352. * VFS mount-point.
  353. *
  354. * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
  355. * frees it again. You are allowed to use iget() and iput() to get references to
  356. * the inode. But each drm_fs_inode_new() call must be paired with exactly one
  357. * drm_fs_inode_free() call (which does not have to be the last iput()).
  358. * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
  359. * between multiple inode-users. You could, technically, call
  360. * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
  361. * iput(), but this way you'd end up with a new vfsmount for each inode.
  362. */
  363. static int drm_fs_cnt;
  364. static struct vfsmount *drm_fs_mnt;
  365. static const struct dentry_operations drm_fs_dops = {
  366. .d_dname = simple_dname,
  367. };
  368. static const struct super_operations drm_fs_sops = {
  369. .statfs = simple_statfs,
  370. };
  371. static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
  372. const char *dev_name, void *data)
  373. {
  374. return mount_pseudo(fs_type,
  375. "drm:",
  376. &drm_fs_sops,
  377. &drm_fs_dops,
  378. 0x010203ff);
  379. }
  380. static struct file_system_type drm_fs_type = {
  381. .name = "drm",
  382. .owner = THIS_MODULE,
  383. .mount = drm_fs_mount,
  384. .kill_sb = kill_anon_super,
  385. };
  386. static struct inode *drm_fs_inode_new(void)
  387. {
  388. struct inode *inode;
  389. int r;
  390. r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
  391. if (r < 0) {
  392. DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
  393. return ERR_PTR(r);
  394. }
  395. inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
  396. if (IS_ERR(inode))
  397. simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
  398. return inode;
  399. }
  400. static void drm_fs_inode_free(struct inode *inode)
  401. {
  402. if (inode) {
  403. iput(inode);
  404. simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
  405. }
  406. }
  407. /**
  408. * drm_dev_alloc - Allocate new drm device
  409. * @driver: DRM driver to allocate device for
  410. * @parent: Parent device object
  411. *
  412. * Allocate and initialize a new DRM device. No device registration is done.
  413. * Call drm_dev_register() to advertice the device to user space and register it
  414. * with other core subsystems.
  415. *
  416. * RETURNS:
  417. * Pointer to new DRM device, or NULL if out of memory.
  418. */
  419. struct drm_device *drm_dev_alloc(struct drm_driver *driver,
  420. struct device *parent)
  421. {
  422. struct drm_device *dev;
  423. int ret;
  424. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  425. if (!dev)
  426. return NULL;
  427. dev->dev = parent;
  428. dev->driver = driver;
  429. INIT_LIST_HEAD(&dev->filelist);
  430. INIT_LIST_HEAD(&dev->ctxlist);
  431. INIT_LIST_HEAD(&dev->vmalist);
  432. INIT_LIST_HEAD(&dev->maplist);
  433. INIT_LIST_HEAD(&dev->vblank_event_list);
  434. spin_lock_init(&dev->count_lock);
  435. spin_lock_init(&dev->event_lock);
  436. mutex_init(&dev->struct_mutex);
  437. mutex_init(&dev->ctxlist_mutex);
  438. if (drm_ht_create(&dev->map_hash, 12))
  439. goto err_free;
  440. ret = drm_ctxbitmap_init(dev);
  441. if (ret) {
  442. DRM_ERROR("Cannot allocate memory for context bitmap.\n");
  443. goto err_ht;
  444. }
  445. if (driver->driver_features & DRIVER_GEM) {
  446. ret = drm_gem_init(dev);
  447. if (ret) {
  448. DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
  449. goto err_ctxbitmap;
  450. }
  451. }
  452. return dev;
  453. err_ctxbitmap:
  454. drm_ctxbitmap_cleanup(dev);
  455. err_ht:
  456. drm_ht_remove(&dev->map_hash);
  457. err_free:
  458. kfree(dev);
  459. return NULL;
  460. }
  461. EXPORT_SYMBOL(drm_dev_alloc);
  462. /**
  463. * drm_dev_free - Free DRM device
  464. * @dev: DRM device to free
  465. *
  466. * Free a DRM device that has previously been allocated via drm_dev_alloc().
  467. * You must not use kfree() instead or you will leak memory.
  468. *
  469. * This must not be called once the device got registered. Use drm_put_dev()
  470. * instead, which then calls drm_dev_free().
  471. */
  472. void drm_dev_free(struct drm_device *dev)
  473. {
  474. drm_put_minor(dev->control);
  475. drm_put_minor(dev->render);
  476. drm_put_minor(dev->primary);
  477. if (dev->driver->driver_features & DRIVER_GEM)
  478. drm_gem_destroy(dev);
  479. drm_ctxbitmap_cleanup(dev);
  480. drm_ht_remove(&dev->map_hash);
  481. kfree(dev->devname);
  482. kfree(dev);
  483. }
  484. EXPORT_SYMBOL(drm_dev_free);
  485. /**
  486. * drm_dev_register - Register DRM device
  487. * @dev: Device to register
  488. *
  489. * Register the DRM device @dev with the system, advertise device to user-space
  490. * and start normal device operation. @dev must be allocated via drm_dev_alloc()
  491. * previously.
  492. *
  493. * Never call this twice on any device!
  494. *
  495. * RETURNS:
  496. * 0 on success, negative error code on failure.
  497. */
  498. int drm_dev_register(struct drm_device *dev, unsigned long flags)
  499. {
  500. int ret;
  501. mutex_lock(&drm_global_mutex);
  502. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  503. ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
  504. if (ret)
  505. goto out_unlock;
  506. }
  507. if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
  508. ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
  509. if (ret)
  510. goto err_control_node;
  511. }
  512. ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
  513. if (ret)
  514. goto err_render_node;
  515. if (dev->driver->load) {
  516. ret = dev->driver->load(dev, flags);
  517. if (ret)
  518. goto err_primary_node;
  519. }
  520. /* setup grouping for legacy outputs */
  521. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  522. ret = drm_mode_group_init_legacy_group(dev,
  523. &dev->primary->mode_group);
  524. if (ret)
  525. goto err_unload;
  526. }
  527. ret = 0;
  528. goto out_unlock;
  529. err_unload:
  530. if (dev->driver->unload)
  531. dev->driver->unload(dev);
  532. err_primary_node:
  533. drm_unplug_minor(dev->primary);
  534. err_render_node:
  535. drm_unplug_minor(dev->render);
  536. err_control_node:
  537. drm_unplug_minor(dev->control);
  538. out_unlock:
  539. mutex_unlock(&drm_global_mutex);
  540. return ret;
  541. }
  542. EXPORT_SYMBOL(drm_dev_register);
  543. /**
  544. * drm_dev_unregister - Unregister DRM device
  545. * @dev: Device to unregister
  546. *
  547. * Unregister the DRM device from the system. This does the reverse of
  548. * drm_dev_register() but does not deallocate the device. The caller must call
  549. * drm_dev_free() to free all resources.
  550. */
  551. void drm_dev_unregister(struct drm_device *dev)
  552. {
  553. struct drm_map_list *r_list, *list_temp;
  554. drm_lastclose(dev);
  555. if (dev->driver->unload)
  556. dev->driver->unload(dev);
  557. if (dev->agp)
  558. drm_pci_agp_destroy(dev);
  559. drm_vblank_cleanup(dev);
  560. list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
  561. drm_rmmap(dev, r_list->map);
  562. drm_unplug_minor(dev->control);
  563. drm_unplug_minor(dev->render);
  564. drm_unplug_minor(dev->primary);
  565. }
  566. EXPORT_SYMBOL(drm_dev_unregister);