drm_stub.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. /**
  2. * \file drm_stub.h
  3. * Stub support
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. */
  7. /*
  8. * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
  9. *
  10. * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
  11. * All Rights Reserved.
  12. *
  13. * Permission is hereby granted, free of charge, to any person obtaining a
  14. * copy of this software and associated documentation files (the "Software"),
  15. * to deal in the Software without restriction, including without limitation
  16. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  17. * and/or sell copies of the Software, and to permit persons to whom the
  18. * Software is furnished to do so, subject to the following conditions:
  19. *
  20. * The above copyright notice and this permission notice (including the next
  21. * paragraph) shall be included in all copies or substantial portions of the
  22. * Software.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  27. * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  28. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  29. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  30. * DEALINGS IN THE SOFTWARE.
  31. */
  32. #include <linux/fs.h>
  33. #include <linux/module.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/mount.h>
  36. #include <linux/slab.h>
  37. #include <drm/drmP.h>
  38. #include <drm/drm_core.h>
  39. unsigned int drm_debug = 0; /* 1 to enable debug output */
  40. EXPORT_SYMBOL(drm_debug);
  41. unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
  42. EXPORT_SYMBOL(drm_rnodes);
  43. unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
  44. EXPORT_SYMBOL(drm_vblank_offdelay);
  45. unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
  46. EXPORT_SYMBOL(drm_timestamp_precision);
  47. /*
  48. * Default to use monotonic timestamps for wait-for-vblank and page-flip
  49. * complete events.
  50. */
  51. unsigned int drm_timestamp_monotonic = 1;
  52. MODULE_AUTHOR(CORE_AUTHOR);
  53. MODULE_DESCRIPTION(CORE_DESC);
  54. MODULE_LICENSE("GPL and additional rights");
  55. MODULE_PARM_DESC(debug, "Enable debug output");
  56. MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
  57. MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
  58. MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
  59. MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
  60. module_param_named(debug, drm_debug, int, 0600);
  61. module_param_named(rnodes, drm_rnodes, int, 0600);
  62. module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
  63. module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
  64. module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
  65. static DEFINE_SPINLOCK(drm_minor_lock);
  66. struct idr drm_minors_idr;
  67. struct class *drm_class;
  68. struct dentry *drm_debugfs_root;
  69. int drm_err(const char *func, const char *format, ...)
  70. {
  71. struct va_format vaf;
  72. va_list args;
  73. int r;
  74. va_start(args, format);
  75. vaf.fmt = format;
  76. vaf.va = &args;
  77. r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
  78. va_end(args);
  79. return r;
  80. }
  81. EXPORT_SYMBOL(drm_err);
  82. void drm_ut_debug_printk(const char *function_name, const char *format, ...)
  83. {
  84. struct va_format vaf;
  85. va_list args;
  86. va_start(args, format);
  87. vaf.fmt = format;
  88. vaf.va = &args;
  89. printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
  90. va_end(args);
  91. }
  92. EXPORT_SYMBOL(drm_ut_debug_printk);
  93. struct drm_master *drm_master_create(struct drm_minor *minor)
  94. {
  95. struct drm_master *master;
  96. master = kzalloc(sizeof(*master), GFP_KERNEL);
  97. if (!master)
  98. return NULL;
  99. kref_init(&master->refcount);
  100. spin_lock_init(&master->lock.spinlock);
  101. init_waitqueue_head(&master->lock.lock_queue);
  102. drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
  103. INIT_LIST_HEAD(&master->magicfree);
  104. master->minor = minor;
  105. return master;
  106. }
  107. struct drm_master *drm_master_get(struct drm_master *master)
  108. {
  109. kref_get(&master->refcount);
  110. return master;
  111. }
  112. EXPORT_SYMBOL(drm_master_get);
  113. static void drm_master_destroy(struct kref *kref)
  114. {
  115. struct drm_master *master = container_of(kref, struct drm_master, refcount);
  116. struct drm_magic_entry *pt, *next;
  117. struct drm_device *dev = master->minor->dev;
  118. struct drm_map_list *r_list, *list_temp;
  119. mutex_lock(&dev->struct_mutex);
  120. if (dev->driver->master_destroy)
  121. dev->driver->master_destroy(dev, master);
  122. list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
  123. if (r_list->master == master) {
  124. drm_rmmap_locked(dev, r_list->map);
  125. r_list = NULL;
  126. }
  127. }
  128. if (master->unique) {
  129. kfree(master->unique);
  130. master->unique = NULL;
  131. master->unique_len = 0;
  132. }
  133. kfree(dev->devname);
  134. dev->devname = NULL;
  135. list_for_each_entry_safe(pt, next, &master->magicfree, head) {
  136. list_del(&pt->head);
  137. drm_ht_remove_item(&master->magiclist, &pt->hash_item);
  138. kfree(pt);
  139. }
  140. drm_ht_remove(&master->magiclist);
  141. mutex_unlock(&dev->struct_mutex);
  142. kfree(master);
  143. }
  144. void drm_master_put(struct drm_master **master)
  145. {
  146. kref_put(&(*master)->refcount, drm_master_destroy);
  147. *master = NULL;
  148. }
  149. EXPORT_SYMBOL(drm_master_put);
  150. int drm_setmaster_ioctl(struct drm_device *dev, void *data,
  151. struct drm_file *file_priv)
  152. {
  153. int ret = 0;
  154. mutex_lock(&dev->master_mutex);
  155. if (file_priv->is_master)
  156. goto out_unlock;
  157. if (file_priv->minor->master) {
  158. ret = -EINVAL;
  159. goto out_unlock;
  160. }
  161. if (!file_priv->master) {
  162. ret = -EINVAL;
  163. goto out_unlock;
  164. }
  165. file_priv->minor->master = drm_master_get(file_priv->master);
  166. file_priv->is_master = 1;
  167. if (dev->driver->master_set) {
  168. ret = dev->driver->master_set(dev, file_priv, false);
  169. if (unlikely(ret != 0)) {
  170. file_priv->is_master = 0;
  171. drm_master_put(&file_priv->minor->master);
  172. }
  173. }
  174. out_unlock:
  175. mutex_unlock(&dev->master_mutex);
  176. return ret;
  177. }
  178. int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
  179. struct drm_file *file_priv)
  180. {
  181. int ret = -EINVAL;
  182. mutex_lock(&dev->master_mutex);
  183. if (!file_priv->is_master)
  184. goto out_unlock;
  185. if (!file_priv->minor->master)
  186. goto out_unlock;
  187. ret = 0;
  188. if (dev->driver->master_drop)
  189. dev->driver->master_drop(dev, file_priv, false);
  190. drm_master_put(&file_priv->minor->master);
  191. file_priv->is_master = 0;
  192. out_unlock:
  193. mutex_unlock(&dev->master_mutex);
  194. return ret;
  195. }
  196. /*
  197. * DRM Minors
  198. * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
  199. * of them is represented by a drm_minor object. Depending on the capabilities
  200. * of the device-driver, different interfaces are registered.
  201. *
  202. * Minors can be accessed via dev->$minor_name. This pointer is either
  203. * NULL or a valid drm_minor pointer and stays valid as long as the device is
  204. * valid. This means, DRM minors have the same life-time as the underlying
  205. * device. However, this doesn't mean that the minor is active. Minors are
  206. * registered and unregistered dynamically according to device-state.
  207. */
  208. static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
  209. unsigned int type)
  210. {
  211. switch (type) {
  212. case DRM_MINOR_LEGACY:
  213. return &dev->primary;
  214. case DRM_MINOR_RENDER:
  215. return &dev->render;
  216. case DRM_MINOR_CONTROL:
  217. return &dev->control;
  218. default:
  219. return NULL;
  220. }
  221. }
  222. static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
  223. {
  224. struct drm_minor *minor;
  225. minor = kzalloc(sizeof(*minor), GFP_KERNEL);
  226. if (!minor)
  227. return -ENOMEM;
  228. minor->type = type;
  229. minor->dev = dev;
  230. *drm_minor_get_slot(dev, type) = minor;
  231. return 0;
  232. }
  233. static void drm_minor_free(struct drm_device *dev, unsigned int type)
  234. {
  235. struct drm_minor **slot;
  236. slot = drm_minor_get_slot(dev, type);
  237. if (*slot) {
  238. kfree(*slot);
  239. *slot = NULL;
  240. }
  241. }
  242. static int drm_minor_register(struct drm_device *dev, unsigned int type)
  243. {
  244. struct drm_minor *new_minor;
  245. unsigned long flags;
  246. int ret;
  247. int minor_id;
  248. DRM_DEBUG("\n");
  249. new_minor = *drm_minor_get_slot(dev, type);
  250. if (!new_minor)
  251. return 0;
  252. idr_preload(GFP_KERNEL);
  253. spin_lock_irqsave(&drm_minor_lock, flags);
  254. minor_id = idr_alloc(&drm_minors_idr,
  255. NULL,
  256. 64 * type,
  257. 64 * (type + 1),
  258. GFP_NOWAIT);
  259. spin_unlock_irqrestore(&drm_minor_lock, flags);
  260. idr_preload_end();
  261. if (minor_id < 0)
  262. return minor_id;
  263. new_minor->index = minor_id;
  264. ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
  265. if (ret) {
  266. DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
  267. goto err_id;
  268. }
  269. ret = drm_sysfs_device_add(new_minor);
  270. if (ret) {
  271. DRM_ERROR("DRM: Error sysfs_device_add.\n");
  272. goto err_debugfs;
  273. }
  274. /* replace NULL with @minor so lookups will succeed from now on */
  275. spin_lock_irqsave(&drm_minor_lock, flags);
  276. idr_replace(&drm_minors_idr, new_minor, new_minor->index);
  277. spin_unlock_irqrestore(&drm_minor_lock, flags);
  278. DRM_DEBUG("new minor assigned %d\n", minor_id);
  279. return 0;
  280. err_debugfs:
  281. drm_debugfs_cleanup(new_minor);
  282. err_id:
  283. spin_lock_irqsave(&drm_minor_lock, flags);
  284. idr_remove(&drm_minors_idr, minor_id);
  285. spin_unlock_irqrestore(&drm_minor_lock, flags);
  286. new_minor->index = 0;
  287. return ret;
  288. }
  289. static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
  290. {
  291. struct drm_minor *minor;
  292. unsigned long flags;
  293. minor = *drm_minor_get_slot(dev, type);
  294. if (!minor || !minor->kdev)
  295. return;
  296. spin_lock_irqsave(&drm_minor_lock, flags);
  297. idr_remove(&drm_minors_idr, minor->index);
  298. spin_unlock_irqrestore(&drm_minor_lock, flags);
  299. minor->index = 0;
  300. drm_debugfs_cleanup(minor);
  301. drm_sysfs_device_remove(minor);
  302. }
  303. /**
  304. * drm_minor_acquire - Acquire a DRM minor
  305. * @minor_id: Minor ID of the DRM-minor
  306. *
  307. * Looks up the given minor-ID and returns the respective DRM-minor object. The
  308. * refence-count of the underlying device is increased so you must release this
  309. * object with drm_minor_release().
  310. *
  311. * As long as you hold this minor, it is guaranteed that the object and the
  312. * minor->dev pointer will stay valid! However, the device may get unplugged and
  313. * unregistered while you hold the minor.
  314. *
  315. * Returns:
  316. * Pointer to minor-object with increased device-refcount, or PTR_ERR on
  317. * failure.
  318. */
  319. struct drm_minor *drm_minor_acquire(unsigned int minor_id)
  320. {
  321. struct drm_minor *minor;
  322. unsigned long flags;
  323. spin_lock_irqsave(&drm_minor_lock, flags);
  324. minor = idr_find(&drm_minors_idr, minor_id);
  325. if (minor)
  326. drm_dev_ref(minor->dev);
  327. spin_unlock_irqrestore(&drm_minor_lock, flags);
  328. if (!minor) {
  329. return ERR_PTR(-ENODEV);
  330. } else if (drm_device_is_unplugged(minor->dev)) {
  331. drm_dev_unref(minor->dev);
  332. return ERR_PTR(-ENODEV);
  333. }
  334. return minor;
  335. }
  336. /**
  337. * drm_minor_release - Release DRM minor
  338. * @minor: Pointer to DRM minor object
  339. *
  340. * Release a minor that was previously acquired via drm_minor_acquire().
  341. */
  342. void drm_minor_release(struct drm_minor *minor)
  343. {
  344. drm_dev_unref(minor->dev);
  345. }
  346. /**
  347. * Called via drm_exit() at module unload time or when pci device is
  348. * unplugged.
  349. *
  350. * Cleans up all DRM device, calling drm_lastclose().
  351. *
  352. */
  353. void drm_put_dev(struct drm_device *dev)
  354. {
  355. DRM_DEBUG("\n");
  356. if (!dev) {
  357. DRM_ERROR("cleanup called no dev\n");
  358. return;
  359. }
  360. drm_dev_unregister(dev);
  361. drm_dev_unref(dev);
  362. }
  363. EXPORT_SYMBOL(drm_put_dev);
  364. void drm_unplug_dev(struct drm_device *dev)
  365. {
  366. /* for a USB device */
  367. drm_minor_unregister(dev, DRM_MINOR_LEGACY);
  368. drm_minor_unregister(dev, DRM_MINOR_RENDER);
  369. drm_minor_unregister(dev, DRM_MINOR_CONTROL);
  370. mutex_lock(&drm_global_mutex);
  371. drm_device_set_unplugged(dev);
  372. if (dev->open_count == 0) {
  373. drm_put_dev(dev);
  374. }
  375. mutex_unlock(&drm_global_mutex);
  376. }
  377. EXPORT_SYMBOL(drm_unplug_dev);
  378. /*
  379. * DRM internal mount
  380. * We want to be able to allocate our own "struct address_space" to control
  381. * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
  382. * stand-alone address_space objects, so we need an underlying inode. As there
  383. * is no way to allocate an independent inode easily, we need a fake internal
  384. * VFS mount-point.
  385. *
  386. * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
  387. * frees it again. You are allowed to use iget() and iput() to get references to
  388. * the inode. But each drm_fs_inode_new() call must be paired with exactly one
  389. * drm_fs_inode_free() call (which does not have to be the last iput()).
  390. * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
  391. * between multiple inode-users. You could, technically, call
  392. * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
  393. * iput(), but this way you'd end up with a new vfsmount for each inode.
  394. */
  395. static int drm_fs_cnt;
  396. static struct vfsmount *drm_fs_mnt;
  397. static const struct dentry_operations drm_fs_dops = {
  398. .d_dname = simple_dname,
  399. };
  400. static const struct super_operations drm_fs_sops = {
  401. .statfs = simple_statfs,
  402. };
  403. static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
  404. const char *dev_name, void *data)
  405. {
  406. return mount_pseudo(fs_type,
  407. "drm:",
  408. &drm_fs_sops,
  409. &drm_fs_dops,
  410. 0x010203ff);
  411. }
  412. static struct file_system_type drm_fs_type = {
  413. .name = "drm",
  414. .owner = THIS_MODULE,
  415. .mount = drm_fs_mount,
  416. .kill_sb = kill_anon_super,
  417. };
  418. static struct inode *drm_fs_inode_new(void)
  419. {
  420. struct inode *inode;
  421. int r;
  422. r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
  423. if (r < 0) {
  424. DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
  425. return ERR_PTR(r);
  426. }
  427. inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
  428. if (IS_ERR(inode))
  429. simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
  430. return inode;
  431. }
  432. static void drm_fs_inode_free(struct inode *inode)
  433. {
  434. if (inode) {
  435. iput(inode);
  436. simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
  437. }
  438. }
  439. /**
  440. * drm_dev_alloc - Allocate new drm device
  441. * @driver: DRM driver to allocate device for
  442. * @parent: Parent device object
  443. *
  444. * Allocate and initialize a new DRM device. No device registration is done.
  445. * Call drm_dev_register() to advertice the device to user space and register it
  446. * with other core subsystems.
  447. *
  448. * The initial ref-count of the object is 1. Use drm_dev_ref() and
  449. * drm_dev_unref() to take and drop further ref-counts.
  450. *
  451. * RETURNS:
  452. * Pointer to new DRM device, or NULL if out of memory.
  453. */
  454. struct drm_device *drm_dev_alloc(struct drm_driver *driver,
  455. struct device *parent)
  456. {
  457. struct drm_device *dev;
  458. int ret;
  459. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  460. if (!dev)
  461. return NULL;
  462. kref_init(&dev->ref);
  463. dev->dev = parent;
  464. dev->driver = driver;
  465. INIT_LIST_HEAD(&dev->filelist);
  466. INIT_LIST_HEAD(&dev->ctxlist);
  467. INIT_LIST_HEAD(&dev->vmalist);
  468. INIT_LIST_HEAD(&dev->maplist);
  469. INIT_LIST_HEAD(&dev->vblank_event_list);
  470. spin_lock_init(&dev->count_lock);
  471. spin_lock_init(&dev->event_lock);
  472. mutex_init(&dev->struct_mutex);
  473. mutex_init(&dev->ctxlist_mutex);
  474. mutex_init(&dev->master_mutex);
  475. dev->anon_inode = drm_fs_inode_new();
  476. if (IS_ERR(dev->anon_inode)) {
  477. ret = PTR_ERR(dev->anon_inode);
  478. DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
  479. goto err_free;
  480. }
  481. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  482. ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
  483. if (ret)
  484. goto err_minors;
  485. }
  486. if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
  487. ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
  488. if (ret)
  489. goto err_minors;
  490. }
  491. ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
  492. if (ret)
  493. goto err_minors;
  494. if (drm_ht_create(&dev->map_hash, 12))
  495. goto err_minors;
  496. ret = drm_ctxbitmap_init(dev);
  497. if (ret) {
  498. DRM_ERROR("Cannot allocate memory for context bitmap.\n");
  499. goto err_ht;
  500. }
  501. if (driver->driver_features & DRIVER_GEM) {
  502. ret = drm_gem_init(dev);
  503. if (ret) {
  504. DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
  505. goto err_ctxbitmap;
  506. }
  507. }
  508. return dev;
  509. err_ctxbitmap:
  510. drm_ctxbitmap_cleanup(dev);
  511. err_ht:
  512. drm_ht_remove(&dev->map_hash);
  513. err_minors:
  514. drm_minor_free(dev, DRM_MINOR_LEGACY);
  515. drm_minor_free(dev, DRM_MINOR_RENDER);
  516. drm_minor_free(dev, DRM_MINOR_CONTROL);
  517. drm_fs_inode_free(dev->anon_inode);
  518. err_free:
  519. mutex_destroy(&dev->master_mutex);
  520. kfree(dev);
  521. return NULL;
  522. }
  523. EXPORT_SYMBOL(drm_dev_alloc);
  524. static void drm_dev_release(struct kref *ref)
  525. {
  526. struct drm_device *dev = container_of(ref, struct drm_device, ref);
  527. if (dev->driver->driver_features & DRIVER_GEM)
  528. drm_gem_destroy(dev);
  529. drm_ctxbitmap_cleanup(dev);
  530. drm_ht_remove(&dev->map_hash);
  531. drm_fs_inode_free(dev->anon_inode);
  532. drm_minor_free(dev, DRM_MINOR_LEGACY);
  533. drm_minor_free(dev, DRM_MINOR_RENDER);
  534. drm_minor_free(dev, DRM_MINOR_CONTROL);
  535. kfree(dev->devname);
  536. mutex_destroy(&dev->master_mutex);
  537. kfree(dev);
  538. }
  539. /**
  540. * drm_dev_ref - Take reference of a DRM device
  541. * @dev: device to take reference of or NULL
  542. *
  543. * This increases the ref-count of @dev by one. You *must* already own a
  544. * reference when calling this. Use drm_dev_unref() to drop this reference
  545. * again.
  546. *
  547. * This function never fails. However, this function does not provide *any*
  548. * guarantee whether the device is alive or running. It only provides a
  549. * reference to the object and the memory associated with it.
  550. */
  551. void drm_dev_ref(struct drm_device *dev)
  552. {
  553. if (dev)
  554. kref_get(&dev->ref);
  555. }
  556. EXPORT_SYMBOL(drm_dev_ref);
  557. /**
  558. * drm_dev_unref - Drop reference of a DRM device
  559. * @dev: device to drop reference of or NULL
  560. *
  561. * This decreases the ref-count of @dev by one. The device is destroyed if the
  562. * ref-count drops to zero.
  563. */
  564. void drm_dev_unref(struct drm_device *dev)
  565. {
  566. if (dev)
  567. kref_put(&dev->ref, drm_dev_release);
  568. }
  569. EXPORT_SYMBOL(drm_dev_unref);
  570. /**
  571. * drm_dev_register - Register DRM device
  572. * @dev: Device to register
  573. *
  574. * Register the DRM device @dev with the system, advertise device to user-space
  575. * and start normal device operation. @dev must be allocated via drm_dev_alloc()
  576. * previously.
  577. *
  578. * Never call this twice on any device!
  579. *
  580. * RETURNS:
  581. * 0 on success, negative error code on failure.
  582. */
  583. int drm_dev_register(struct drm_device *dev, unsigned long flags)
  584. {
  585. int ret;
  586. mutex_lock(&drm_global_mutex);
  587. ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
  588. if (ret)
  589. goto err_minors;
  590. ret = drm_minor_register(dev, DRM_MINOR_RENDER);
  591. if (ret)
  592. goto err_minors;
  593. ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
  594. if (ret)
  595. goto err_minors;
  596. if (dev->driver->load) {
  597. ret = dev->driver->load(dev, flags);
  598. if (ret)
  599. goto err_minors;
  600. }
  601. /* setup grouping for legacy outputs */
  602. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  603. ret = drm_mode_group_init_legacy_group(dev,
  604. &dev->primary->mode_group);
  605. if (ret)
  606. goto err_unload;
  607. }
  608. ret = 0;
  609. goto out_unlock;
  610. err_unload:
  611. if (dev->driver->unload)
  612. dev->driver->unload(dev);
  613. err_minors:
  614. drm_minor_unregister(dev, DRM_MINOR_LEGACY);
  615. drm_minor_unregister(dev, DRM_MINOR_RENDER);
  616. drm_minor_unregister(dev, DRM_MINOR_CONTROL);
  617. out_unlock:
  618. mutex_unlock(&drm_global_mutex);
  619. return ret;
  620. }
  621. EXPORT_SYMBOL(drm_dev_register);
  622. /**
  623. * drm_dev_unregister - Unregister DRM device
  624. * @dev: Device to unregister
  625. *
  626. * Unregister the DRM device from the system. This does the reverse of
  627. * drm_dev_register() but does not deallocate the device. The caller must call
  628. * drm_dev_unref() to drop their final reference.
  629. */
  630. void drm_dev_unregister(struct drm_device *dev)
  631. {
  632. struct drm_map_list *r_list, *list_temp;
  633. drm_lastclose(dev);
  634. if (dev->driver->unload)
  635. dev->driver->unload(dev);
  636. if (dev->agp)
  637. drm_pci_agp_destroy(dev);
  638. drm_vblank_cleanup(dev);
  639. list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
  640. drm_rmmap(dev, r_list->map);
  641. drm_minor_unregister(dev, DRM_MINOR_LEGACY);
  642. drm_minor_unregister(dev, DRM_MINOR_RENDER);
  643. drm_minor_unregister(dev, DRM_MINOR_CONTROL);
  644. }
  645. EXPORT_SYMBOL(drm_dev_unregister);