drm_stub.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /**
  2. * \file drm_stub.h
  3. * Stub support
  4. *
  5. * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6. */
  7. /*
  8. * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
  9. *
  10. * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
  11. * All Rights Reserved.
  12. *
  13. * Permission is hereby granted, free of charge, to any person obtaining a
  14. * copy of this software and associated documentation files (the "Software"),
  15. * to deal in the Software without restriction, including without limitation
  16. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  17. * and/or sell copies of the Software, and to permit persons to whom the
  18. * Software is furnished to do so, subject to the following conditions:
  19. *
  20. * The above copyright notice and this permission notice (including the next
  21. * paragraph) shall be included in all copies or substantial portions of the
  22. * Software.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  27. * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  28. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  29. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  30. * DEALINGS IN THE SOFTWARE.
  31. */
  32. #include <linux/fs.h>
  33. #include <linux/module.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/mount.h>
  36. #include <linux/slab.h>
  37. #include <drm/drmP.h>
  38. #include <drm/drm_core.h>
  39. unsigned int drm_debug = 0; /* 1 to enable debug output */
  40. EXPORT_SYMBOL(drm_debug);
  41. unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
  42. EXPORT_SYMBOL(drm_rnodes);
  43. /* 1 to allow user space to request universal planes (experimental) */
  44. unsigned int drm_universal_planes = 0;
  45. EXPORT_SYMBOL(drm_universal_planes);
  46. unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
  47. EXPORT_SYMBOL(drm_vblank_offdelay);
  48. unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
  49. EXPORT_SYMBOL(drm_timestamp_precision);
  50. /*
  51. * Default to use monotonic timestamps for wait-for-vblank and page-flip
  52. * complete events.
  53. */
  54. unsigned int drm_timestamp_monotonic = 1;
  55. MODULE_AUTHOR(CORE_AUTHOR);
  56. MODULE_DESCRIPTION(CORE_DESC);
  57. MODULE_LICENSE("GPL and additional rights");
  58. MODULE_PARM_DESC(debug, "Enable debug output");
  59. MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
  60. MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
  61. MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
  62. MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
  63. module_param_named(debug, drm_debug, int, 0600);
  64. module_param_named(rnodes, drm_rnodes, int, 0600);
  65. module_param_named(universal_planes, drm_universal_planes, int, 0600);
  66. module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
  67. module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
  68. module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
  69. static DEFINE_SPINLOCK(drm_minor_lock);
  70. struct idr drm_minors_idr;
  71. struct class *drm_class;
  72. struct dentry *drm_debugfs_root;
  73. int drm_err(const char *func, const char *format, ...)
  74. {
  75. struct va_format vaf;
  76. va_list args;
  77. int r;
  78. va_start(args, format);
  79. vaf.fmt = format;
  80. vaf.va = &args;
  81. r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
  82. va_end(args);
  83. return r;
  84. }
  85. EXPORT_SYMBOL(drm_err);
  86. void drm_ut_debug_printk(const char *function_name, const char *format, ...)
  87. {
  88. struct va_format vaf;
  89. va_list args;
  90. va_start(args, format);
  91. vaf.fmt = format;
  92. vaf.va = &args;
  93. printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
  94. va_end(args);
  95. }
  96. EXPORT_SYMBOL(drm_ut_debug_printk);
  97. struct drm_master *drm_master_create(struct drm_minor *minor)
  98. {
  99. struct drm_master *master;
  100. master = kzalloc(sizeof(*master), GFP_KERNEL);
  101. if (!master)
  102. return NULL;
  103. kref_init(&master->refcount);
  104. spin_lock_init(&master->lock.spinlock);
  105. init_waitqueue_head(&master->lock.lock_queue);
  106. if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
  107. kfree(master);
  108. return NULL;
  109. }
  110. INIT_LIST_HEAD(&master->magicfree);
  111. master->minor = minor;
  112. return master;
  113. }
  114. struct drm_master *drm_master_get(struct drm_master *master)
  115. {
  116. kref_get(&master->refcount);
  117. return master;
  118. }
  119. EXPORT_SYMBOL(drm_master_get);
  120. static void drm_master_destroy(struct kref *kref)
  121. {
  122. struct drm_master *master = container_of(kref, struct drm_master, refcount);
  123. struct drm_magic_entry *pt, *next;
  124. struct drm_device *dev = master->minor->dev;
  125. struct drm_map_list *r_list, *list_temp;
  126. mutex_lock(&dev->struct_mutex);
  127. if (dev->driver->master_destroy)
  128. dev->driver->master_destroy(dev, master);
  129. list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
  130. if (r_list->master == master) {
  131. drm_rmmap_locked(dev, r_list->map);
  132. r_list = NULL;
  133. }
  134. }
  135. if (master->unique) {
  136. kfree(master->unique);
  137. master->unique = NULL;
  138. master->unique_len = 0;
  139. }
  140. kfree(dev->devname);
  141. dev->devname = NULL;
  142. list_for_each_entry_safe(pt, next, &master->magicfree, head) {
  143. list_del(&pt->head);
  144. drm_ht_remove_item(&master->magiclist, &pt->hash_item);
  145. kfree(pt);
  146. }
  147. drm_ht_remove(&master->magiclist);
  148. mutex_unlock(&dev->struct_mutex);
  149. kfree(master);
  150. }
  151. void drm_master_put(struct drm_master **master)
  152. {
  153. kref_put(&(*master)->refcount, drm_master_destroy);
  154. *master = NULL;
  155. }
  156. EXPORT_SYMBOL(drm_master_put);
  157. int drm_setmaster_ioctl(struct drm_device *dev, void *data,
  158. struct drm_file *file_priv)
  159. {
  160. int ret = 0;
  161. mutex_lock(&dev->master_mutex);
  162. if (file_priv->is_master)
  163. goto out_unlock;
  164. if (file_priv->minor->master) {
  165. ret = -EINVAL;
  166. goto out_unlock;
  167. }
  168. if (!file_priv->master) {
  169. ret = -EINVAL;
  170. goto out_unlock;
  171. }
  172. file_priv->minor->master = drm_master_get(file_priv->master);
  173. file_priv->is_master = 1;
  174. if (dev->driver->master_set) {
  175. ret = dev->driver->master_set(dev, file_priv, false);
  176. if (unlikely(ret != 0)) {
  177. file_priv->is_master = 0;
  178. drm_master_put(&file_priv->minor->master);
  179. }
  180. }
  181. out_unlock:
  182. mutex_unlock(&dev->master_mutex);
  183. return ret;
  184. }
  185. int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
  186. struct drm_file *file_priv)
  187. {
  188. int ret = -EINVAL;
  189. mutex_lock(&dev->master_mutex);
  190. if (!file_priv->is_master)
  191. goto out_unlock;
  192. if (!file_priv->minor->master)
  193. goto out_unlock;
  194. ret = 0;
  195. if (dev->driver->master_drop)
  196. dev->driver->master_drop(dev, file_priv, false);
  197. drm_master_put(&file_priv->minor->master);
  198. file_priv->is_master = 0;
  199. out_unlock:
  200. mutex_unlock(&dev->master_mutex);
  201. return ret;
  202. }
  203. /*
  204. * DRM Minors
  205. * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
  206. * of them is represented by a drm_minor object. Depending on the capabilities
  207. * of the device-driver, different interfaces are registered.
  208. *
  209. * Minors can be accessed via dev->$minor_name. This pointer is either
  210. * NULL or a valid drm_minor pointer and stays valid as long as the device is
  211. * valid. This means, DRM minors have the same life-time as the underlying
  212. * device. However, this doesn't mean that the minor is active. Minors are
  213. * registered and unregistered dynamically according to device-state.
  214. */
  215. static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
  216. unsigned int type)
  217. {
  218. switch (type) {
  219. case DRM_MINOR_LEGACY:
  220. return &dev->primary;
  221. case DRM_MINOR_RENDER:
  222. return &dev->render;
  223. case DRM_MINOR_CONTROL:
  224. return &dev->control;
  225. default:
  226. return NULL;
  227. }
  228. }
  229. static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
  230. {
  231. struct drm_minor *minor;
  232. minor = kzalloc(sizeof(*minor), GFP_KERNEL);
  233. if (!minor)
  234. return -ENOMEM;
  235. minor->type = type;
  236. minor->dev = dev;
  237. *drm_minor_get_slot(dev, type) = minor;
  238. return 0;
  239. }
  240. static void drm_minor_free(struct drm_device *dev, unsigned int type)
  241. {
  242. struct drm_minor **slot;
  243. slot = drm_minor_get_slot(dev, type);
  244. if (*slot) {
  245. kfree(*slot);
  246. *slot = NULL;
  247. }
  248. }
  249. static int drm_minor_register(struct drm_device *dev, unsigned int type)
  250. {
  251. struct drm_minor *new_minor;
  252. unsigned long flags;
  253. int ret;
  254. int minor_id;
  255. DRM_DEBUG("\n");
  256. new_minor = *drm_minor_get_slot(dev, type);
  257. if (!new_minor)
  258. return 0;
  259. idr_preload(GFP_KERNEL);
  260. spin_lock_irqsave(&drm_minor_lock, flags);
  261. minor_id = idr_alloc(&drm_minors_idr,
  262. NULL,
  263. 64 * type,
  264. 64 * (type + 1),
  265. GFP_NOWAIT);
  266. spin_unlock_irqrestore(&drm_minor_lock, flags);
  267. idr_preload_end();
  268. if (minor_id < 0)
  269. return minor_id;
  270. new_minor->index = minor_id;
  271. ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
  272. if (ret) {
  273. DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
  274. goto err_id;
  275. }
  276. ret = drm_sysfs_device_add(new_minor);
  277. if (ret) {
  278. DRM_ERROR("DRM: Error sysfs_device_add.\n");
  279. goto err_debugfs;
  280. }
  281. /* replace NULL with @minor so lookups will succeed from now on */
  282. spin_lock_irqsave(&drm_minor_lock, flags);
  283. idr_replace(&drm_minors_idr, new_minor, new_minor->index);
  284. spin_unlock_irqrestore(&drm_minor_lock, flags);
  285. DRM_DEBUG("new minor assigned %d\n", minor_id);
  286. return 0;
  287. err_debugfs:
  288. drm_debugfs_cleanup(new_minor);
  289. err_id:
  290. spin_lock_irqsave(&drm_minor_lock, flags);
  291. idr_remove(&drm_minors_idr, minor_id);
  292. spin_unlock_irqrestore(&drm_minor_lock, flags);
  293. new_minor->index = 0;
  294. return ret;
  295. }
  296. static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
  297. {
  298. struct drm_minor *minor;
  299. unsigned long flags;
  300. minor = *drm_minor_get_slot(dev, type);
  301. if (!minor || !minor->kdev)
  302. return;
  303. spin_lock_irqsave(&drm_minor_lock, flags);
  304. idr_remove(&drm_minors_idr, minor->index);
  305. spin_unlock_irqrestore(&drm_minor_lock, flags);
  306. minor->index = 0;
  307. drm_debugfs_cleanup(minor);
  308. drm_sysfs_device_remove(minor);
  309. }
  310. /**
  311. * drm_minor_acquire - Acquire a DRM minor
  312. * @minor_id: Minor ID of the DRM-minor
  313. *
  314. * Looks up the given minor-ID and returns the respective DRM-minor object. The
  315. * refence-count of the underlying device is increased so you must release this
  316. * object with drm_minor_release().
  317. *
  318. * As long as you hold this minor, it is guaranteed that the object and the
  319. * minor->dev pointer will stay valid! However, the device may get unplugged and
  320. * unregistered while you hold the minor.
  321. *
  322. * Returns:
  323. * Pointer to minor-object with increased device-refcount, or PTR_ERR on
  324. * failure.
  325. */
  326. struct drm_minor *drm_minor_acquire(unsigned int minor_id)
  327. {
  328. struct drm_minor *minor;
  329. unsigned long flags;
  330. spin_lock_irqsave(&drm_minor_lock, flags);
  331. minor = idr_find(&drm_minors_idr, minor_id);
  332. if (minor)
  333. drm_dev_ref(minor->dev);
  334. spin_unlock_irqrestore(&drm_minor_lock, flags);
  335. if (!minor) {
  336. return ERR_PTR(-ENODEV);
  337. } else if (drm_device_is_unplugged(minor->dev)) {
  338. drm_dev_unref(minor->dev);
  339. return ERR_PTR(-ENODEV);
  340. }
  341. return minor;
  342. }
  343. /**
  344. * drm_minor_release - Release DRM minor
  345. * @minor: Pointer to DRM minor object
  346. *
  347. * Release a minor that was previously acquired via drm_minor_acquire().
  348. */
  349. void drm_minor_release(struct drm_minor *minor)
  350. {
  351. drm_dev_unref(minor->dev);
  352. }
  353. /**
  354. * Called via drm_exit() at module unload time or when pci device is
  355. * unplugged.
  356. *
  357. * Cleans up all DRM device, calling drm_lastclose().
  358. *
  359. */
  360. void drm_put_dev(struct drm_device *dev)
  361. {
  362. DRM_DEBUG("\n");
  363. if (!dev) {
  364. DRM_ERROR("cleanup called no dev\n");
  365. return;
  366. }
  367. drm_dev_unregister(dev);
  368. drm_dev_unref(dev);
  369. }
  370. EXPORT_SYMBOL(drm_put_dev);
  371. void drm_unplug_dev(struct drm_device *dev)
  372. {
  373. /* for a USB device */
  374. drm_minor_unregister(dev, DRM_MINOR_LEGACY);
  375. drm_minor_unregister(dev, DRM_MINOR_RENDER);
  376. drm_minor_unregister(dev, DRM_MINOR_CONTROL);
  377. mutex_lock(&drm_global_mutex);
  378. drm_device_set_unplugged(dev);
  379. if (dev->open_count == 0) {
  380. drm_put_dev(dev);
  381. }
  382. mutex_unlock(&drm_global_mutex);
  383. }
  384. EXPORT_SYMBOL(drm_unplug_dev);
  385. /*
  386. * DRM internal mount
  387. * We want to be able to allocate our own "struct address_space" to control
  388. * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
  389. * stand-alone address_space objects, so we need an underlying inode. As there
  390. * is no way to allocate an independent inode easily, we need a fake internal
  391. * VFS mount-point.
  392. *
  393. * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
  394. * frees it again. You are allowed to use iget() and iput() to get references to
  395. * the inode. But each drm_fs_inode_new() call must be paired with exactly one
  396. * drm_fs_inode_free() call (which does not have to be the last iput()).
  397. * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
  398. * between multiple inode-users. You could, technically, call
  399. * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
  400. * iput(), but this way you'd end up with a new vfsmount for each inode.
  401. */
  402. static int drm_fs_cnt;
  403. static struct vfsmount *drm_fs_mnt;
  404. static const struct dentry_operations drm_fs_dops = {
  405. .d_dname = simple_dname,
  406. };
  407. static const struct super_operations drm_fs_sops = {
  408. .statfs = simple_statfs,
  409. };
  410. static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
  411. const char *dev_name, void *data)
  412. {
  413. return mount_pseudo(fs_type,
  414. "drm:",
  415. &drm_fs_sops,
  416. &drm_fs_dops,
  417. 0x010203ff);
  418. }
  419. static struct file_system_type drm_fs_type = {
  420. .name = "drm",
  421. .owner = THIS_MODULE,
  422. .mount = drm_fs_mount,
  423. .kill_sb = kill_anon_super,
  424. };
  425. static struct inode *drm_fs_inode_new(void)
  426. {
  427. struct inode *inode;
  428. int r;
  429. r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
  430. if (r < 0) {
  431. DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
  432. return ERR_PTR(r);
  433. }
  434. inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
  435. if (IS_ERR(inode))
  436. simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
  437. return inode;
  438. }
  439. static void drm_fs_inode_free(struct inode *inode)
  440. {
  441. if (inode) {
  442. iput(inode);
  443. simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
  444. }
  445. }
  446. /**
  447. * drm_dev_alloc - Allocate new drm device
  448. * @driver: DRM driver to allocate device for
  449. * @parent: Parent device object
  450. *
  451. * Allocate and initialize a new DRM device. No device registration is done.
  452. * Call drm_dev_register() to advertice the device to user space and register it
  453. * with other core subsystems.
  454. *
  455. * The initial ref-count of the object is 1. Use drm_dev_ref() and
  456. * drm_dev_unref() to take and drop further ref-counts.
  457. *
  458. * RETURNS:
  459. * Pointer to new DRM device, or NULL if out of memory.
  460. */
  461. struct drm_device *drm_dev_alloc(struct drm_driver *driver,
  462. struct device *parent)
  463. {
  464. struct drm_device *dev;
  465. int ret;
  466. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  467. if (!dev)
  468. return NULL;
  469. kref_init(&dev->ref);
  470. dev->dev = parent;
  471. dev->driver = driver;
  472. INIT_LIST_HEAD(&dev->filelist);
  473. INIT_LIST_HEAD(&dev->ctxlist);
  474. INIT_LIST_HEAD(&dev->vmalist);
  475. INIT_LIST_HEAD(&dev->maplist);
  476. INIT_LIST_HEAD(&dev->vblank_event_list);
  477. spin_lock_init(&dev->count_lock);
  478. spin_lock_init(&dev->event_lock);
  479. mutex_init(&dev->struct_mutex);
  480. mutex_init(&dev->ctxlist_mutex);
  481. mutex_init(&dev->master_mutex);
  482. dev->anon_inode = drm_fs_inode_new();
  483. if (IS_ERR(dev->anon_inode)) {
  484. ret = PTR_ERR(dev->anon_inode);
  485. DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
  486. goto err_free;
  487. }
  488. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  489. ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
  490. if (ret)
  491. goto err_minors;
  492. }
  493. if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
  494. ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
  495. if (ret)
  496. goto err_minors;
  497. }
  498. ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
  499. if (ret)
  500. goto err_minors;
  501. if (drm_ht_create(&dev->map_hash, 12))
  502. goto err_minors;
  503. ret = drm_ctxbitmap_init(dev);
  504. if (ret) {
  505. DRM_ERROR("Cannot allocate memory for context bitmap.\n");
  506. goto err_ht;
  507. }
  508. if (driver->driver_features & DRIVER_GEM) {
  509. ret = drm_gem_init(dev);
  510. if (ret) {
  511. DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
  512. goto err_ctxbitmap;
  513. }
  514. }
  515. return dev;
  516. err_ctxbitmap:
  517. drm_ctxbitmap_cleanup(dev);
  518. err_ht:
  519. drm_ht_remove(&dev->map_hash);
  520. err_minors:
  521. drm_minor_free(dev, DRM_MINOR_LEGACY);
  522. drm_minor_free(dev, DRM_MINOR_RENDER);
  523. drm_minor_free(dev, DRM_MINOR_CONTROL);
  524. drm_fs_inode_free(dev->anon_inode);
  525. err_free:
  526. mutex_destroy(&dev->master_mutex);
  527. kfree(dev);
  528. return NULL;
  529. }
  530. EXPORT_SYMBOL(drm_dev_alloc);
  531. static void drm_dev_release(struct kref *ref)
  532. {
  533. struct drm_device *dev = container_of(ref, struct drm_device, ref);
  534. if (dev->driver->driver_features & DRIVER_GEM)
  535. drm_gem_destroy(dev);
  536. drm_ctxbitmap_cleanup(dev);
  537. drm_ht_remove(&dev->map_hash);
  538. drm_fs_inode_free(dev->anon_inode);
  539. drm_minor_free(dev, DRM_MINOR_LEGACY);
  540. drm_minor_free(dev, DRM_MINOR_RENDER);
  541. drm_minor_free(dev, DRM_MINOR_CONTROL);
  542. kfree(dev->devname);
  543. mutex_destroy(&dev->master_mutex);
  544. kfree(dev);
  545. }
  546. /**
  547. * drm_dev_ref - Take reference of a DRM device
  548. * @dev: device to take reference of or NULL
  549. *
  550. * This increases the ref-count of @dev by one. You *must* already own a
  551. * reference when calling this. Use drm_dev_unref() to drop this reference
  552. * again.
  553. *
  554. * This function never fails. However, this function does not provide *any*
  555. * guarantee whether the device is alive or running. It only provides a
  556. * reference to the object and the memory associated with it.
  557. */
  558. void drm_dev_ref(struct drm_device *dev)
  559. {
  560. if (dev)
  561. kref_get(&dev->ref);
  562. }
  563. EXPORT_SYMBOL(drm_dev_ref);
  564. /**
  565. * drm_dev_unref - Drop reference of a DRM device
  566. * @dev: device to drop reference of or NULL
  567. *
  568. * This decreases the ref-count of @dev by one. The device is destroyed if the
  569. * ref-count drops to zero.
  570. */
  571. void drm_dev_unref(struct drm_device *dev)
  572. {
  573. if (dev)
  574. kref_put(&dev->ref, drm_dev_release);
  575. }
  576. EXPORT_SYMBOL(drm_dev_unref);
  577. /**
  578. * drm_dev_register - Register DRM device
  579. * @dev: Device to register
  580. *
  581. * Register the DRM device @dev with the system, advertise device to user-space
  582. * and start normal device operation. @dev must be allocated via drm_dev_alloc()
  583. * previously.
  584. *
  585. * Never call this twice on any device!
  586. *
  587. * RETURNS:
  588. * 0 on success, negative error code on failure.
  589. */
  590. int drm_dev_register(struct drm_device *dev, unsigned long flags)
  591. {
  592. int ret;
  593. mutex_lock(&drm_global_mutex);
  594. ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
  595. if (ret)
  596. goto err_minors;
  597. ret = drm_minor_register(dev, DRM_MINOR_RENDER);
  598. if (ret)
  599. goto err_minors;
  600. ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
  601. if (ret)
  602. goto err_minors;
  603. if (dev->driver->load) {
  604. ret = dev->driver->load(dev, flags);
  605. if (ret)
  606. goto err_minors;
  607. }
  608. /* setup grouping for legacy outputs */
  609. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  610. ret = drm_mode_group_init_legacy_group(dev,
  611. &dev->primary->mode_group);
  612. if (ret)
  613. goto err_unload;
  614. }
  615. ret = 0;
  616. goto out_unlock;
  617. err_unload:
  618. if (dev->driver->unload)
  619. dev->driver->unload(dev);
  620. err_minors:
  621. drm_minor_unregister(dev, DRM_MINOR_LEGACY);
  622. drm_minor_unregister(dev, DRM_MINOR_RENDER);
  623. drm_minor_unregister(dev, DRM_MINOR_CONTROL);
  624. out_unlock:
  625. mutex_unlock(&drm_global_mutex);
  626. return ret;
  627. }
  628. EXPORT_SYMBOL(drm_dev_register);
  629. /**
  630. * drm_dev_unregister - Unregister DRM device
  631. * @dev: Device to unregister
  632. *
  633. * Unregister the DRM device from the system. This does the reverse of
  634. * drm_dev_register() but does not deallocate the device. The caller must call
  635. * drm_dev_unref() to drop their final reference.
  636. */
  637. void drm_dev_unregister(struct drm_device *dev)
  638. {
  639. struct drm_map_list *r_list, *list_temp;
  640. drm_lastclose(dev);
  641. if (dev->driver->unload)
  642. dev->driver->unload(dev);
  643. if (dev->agp)
  644. drm_pci_agp_destroy(dev);
  645. drm_vblank_cleanup(dev);
  646. list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
  647. drm_rmmap(dev, r_list->map);
  648. drm_minor_unregister(dev, DRM_MINOR_LEGACY);
  649. drm_minor_unregister(dev, DRM_MINOR_RENDER);
  650. drm_minor_unregister(dev, DRM_MINOR_CONTROL);
  651. }
  652. EXPORT_SYMBOL(drm_dev_unregister);