drm_pci.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. /*
  2. * Copyright 2003 José Fonseca.
  3. * Copyright 2003 Leif Delgass.
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  21. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  22. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. #include <linux/pci.h>
  25. #include <linux/slab.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/export.h>
  28. #include <drm/drm_pci.h>
  29. #include <drm/drmP.h>
  30. #include "drm_internal.h"
  31. #include "drm_legacy.h"
  32. /**
  33. * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
  34. * @dev: DRM device
  35. * @size: size of block to allocate
  36. * @align: alignment of block
  37. *
  38. * FIXME: This is a needless abstraction of the Linux dma-api and should be
  39. * removed.
  40. *
  41. * Return: A handle to the allocated memory block on success or NULL on
  42. * failure.
  43. */
  44. drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
  45. {
  46. drm_dma_handle_t *dmah;
  47. unsigned long addr;
  48. size_t sz;
  49. /* pci_alloc_consistent only guarantees alignment to the smallest
  50. * PAGE_SIZE order which is greater than or equal to the requested size.
  51. * Return NULL here for now to make sure nobody tries for larger alignment
  52. */
  53. if (align > size)
  54. return NULL;
  55. dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
  56. if (!dmah)
  57. return NULL;
  58. dmah->size = size;
  59. dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
  60. if (dmah->vaddr == NULL) {
  61. kfree(dmah);
  62. return NULL;
  63. }
  64. memset(dmah->vaddr, 0, size);
  65. /* XXX - Is virt_to_page() legal for consistent mem? */
  66. /* Reserve */
  67. for (addr = (unsigned long)dmah->vaddr, sz = size;
  68. sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
  69. SetPageReserved(virt_to_page((void *)addr));
  70. }
  71. return dmah;
  72. }
  73. EXPORT_SYMBOL(drm_pci_alloc);
  74. /*
  75. * Free a PCI consistent memory block without freeing its descriptor.
  76. *
  77. * This function is for internal use in the Linux-specific DRM core code.
  78. */
  79. void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  80. {
  81. unsigned long addr;
  82. size_t sz;
  83. if (dmah->vaddr) {
  84. /* XXX - Is virt_to_page() legal for consistent mem? */
  85. /* Unreserve */
  86. for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
  87. sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
  88. ClearPageReserved(virt_to_page((void *)addr));
  89. }
  90. dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
  91. dmah->busaddr);
  92. }
  93. }
  94. /**
  95. * drm_pci_free - Free a PCI consistent memory block
  96. * @dev: DRM device
  97. * @dmah: handle to memory block
  98. *
  99. * FIXME: This is a needless abstraction of the Linux dma-api and should be
  100. * removed.
  101. */
  102. void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  103. {
  104. __drm_legacy_pci_free(dev, dmah);
  105. kfree(dmah);
  106. }
  107. EXPORT_SYMBOL(drm_pci_free);
  108. #ifdef CONFIG_PCI
  109. static int drm_get_pci_domain(struct drm_device *dev)
  110. {
  111. #ifndef __alpha__
  112. /* For historical reasons, drm_get_pci_domain() is busticated
  113. * on most archs and has to remain so for userspace interface
  114. * < 1.4, except on alpha which was right from the beginning
  115. */
  116. if (dev->if_version < 0x10004)
  117. return 0;
  118. #endif /* __alpha__ */
  119. return pci_domain_nr(dev->pdev->bus);
  120. }
  121. int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
  122. {
  123. master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
  124. drm_get_pci_domain(dev),
  125. dev->pdev->bus->number,
  126. PCI_SLOT(dev->pdev->devfn),
  127. PCI_FUNC(dev->pdev->devfn));
  128. if (!master->unique)
  129. return -ENOMEM;
  130. master->unique_len = strlen(master->unique);
  131. return 0;
  132. }
  133. static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
  134. {
  135. if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
  136. (p->busnum & 0xff) != dev->pdev->bus->number ||
  137. p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
  138. return -EINVAL;
  139. p->irq = dev->pdev->irq;
  140. DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
  141. p->irq);
  142. return 0;
  143. }
  144. /**
  145. * drm_irq_by_busid - Get interrupt from bus ID
  146. * @dev: DRM device
  147. * @data: IOCTL parameter pointing to a drm_irq_busid structure
  148. * @file_priv: DRM file private.
  149. *
  150. * Finds the PCI device with the specified bus id and gets its IRQ number.
  151. * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
  152. * to that of the device that this DRM instance attached to.
  153. *
  154. * Return: 0 on success or a negative error code on failure.
  155. */
  156. int drm_irq_by_busid(struct drm_device *dev, void *data,
  157. struct drm_file *file_priv)
  158. {
  159. struct drm_irq_busid *p = data;
  160. if (!drm_core_check_feature(dev, DRIVER_LEGACY))
  161. return -EINVAL;
  162. /* UMS was only ever support on PCI devices. */
  163. if (WARN_ON(!dev->pdev))
  164. return -EINVAL;
  165. if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
  166. return -EINVAL;
  167. return drm_pci_irq_by_busid(dev, p);
  168. }
  169. static void drm_pci_agp_init(struct drm_device *dev)
  170. {
  171. if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
  172. if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
  173. dev->agp = drm_agp_init(dev);
  174. if (dev->agp) {
  175. dev->agp->agp_mtrr = arch_phys_wc_add(
  176. dev->agp->agp_info.aper_base,
  177. dev->agp->agp_info.aper_size *
  178. 1024 * 1024);
  179. }
  180. }
  181. }
  182. void drm_pci_agp_destroy(struct drm_device *dev)
  183. {
  184. if (dev->agp) {
  185. arch_phys_wc_del(dev->agp->agp_mtrr);
  186. drm_legacy_agp_clear(dev);
  187. kfree(dev->agp);
  188. dev->agp = NULL;
  189. }
  190. }
  191. /**
  192. * drm_get_pci_dev - Register a PCI device with the DRM subsystem
  193. * @pdev: PCI device
  194. * @ent: entry from the PCI ID table that matches @pdev
  195. * @driver: DRM device driver
  196. *
  197. * Attempt to gets inter module "drm" information. If we are first
  198. * then register the character device and inter module information.
  199. * Try and register, if we fail to register, backout previous work.
  200. *
  201. * NOTE: This function is deprecated, please use drm_dev_alloc() and
  202. * drm_dev_register() instead and remove your &drm_driver.load callback.
  203. *
  204. * Return: 0 on success or a negative error code on failure.
  205. */
  206. int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
  207. struct drm_driver *driver)
  208. {
  209. struct drm_device *dev;
  210. int ret;
  211. DRM_DEBUG("\n");
  212. dev = drm_dev_alloc(driver, &pdev->dev);
  213. if (IS_ERR(dev))
  214. return PTR_ERR(dev);
  215. ret = pci_enable_device(pdev);
  216. if (ret)
  217. goto err_free;
  218. dev->pdev = pdev;
  219. #ifdef __alpha__
  220. dev->hose = pdev->sysdata;
  221. #endif
  222. if (drm_core_check_feature(dev, DRIVER_MODESET))
  223. pci_set_drvdata(pdev, dev);
  224. drm_pci_agp_init(dev);
  225. ret = drm_dev_register(dev, ent->driver_data);
  226. if (ret)
  227. goto err_agp;
  228. /* No locking needed since shadow-attach is single-threaded since it may
  229. * only be called from the per-driver module init hook. */
  230. if (drm_core_check_feature(dev, DRIVER_LEGACY))
  231. list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
  232. return 0;
  233. err_agp:
  234. drm_pci_agp_destroy(dev);
  235. pci_disable_device(pdev);
  236. err_free:
  237. drm_dev_unref(dev);
  238. return ret;
  239. }
  240. EXPORT_SYMBOL(drm_get_pci_dev);
  241. /**
  242. * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
  243. * @driver: DRM device driver
  244. * @pdriver: PCI device driver
  245. *
  246. * This is only used by legacy dri1 drivers and deprecated.
  247. *
  248. * Return: 0 on success or a negative error code on failure.
  249. */
  250. int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
  251. {
  252. struct pci_dev *pdev = NULL;
  253. const struct pci_device_id *pid;
  254. int i;
  255. DRM_DEBUG("\n");
  256. if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
  257. return -EINVAL;
  258. /* If not using KMS, fall back to stealth mode manual scanning. */
  259. INIT_LIST_HEAD(&driver->legacy_dev_list);
  260. for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
  261. pid = &pdriver->id_table[i];
  262. /* Loop around setting up a DRM device for each PCI device
  263. * matching our ID and device class. If we had the internal
  264. * function that pci_get_subsys and pci_get_class used, we'd
  265. * be able to just pass pid in instead of doing a two-stage
  266. * thing.
  267. */
  268. pdev = NULL;
  269. while ((pdev =
  270. pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
  271. pid->subdevice, pdev)) != NULL) {
  272. if ((pdev->class & pid->class_mask) != pid->class)
  273. continue;
  274. /* stealth mode requires a manual probe */
  275. pci_dev_get(pdev);
  276. drm_get_pci_dev(pdev, pid, driver);
  277. }
  278. }
  279. return 0;
  280. }
  281. EXPORT_SYMBOL(drm_legacy_pci_init);
  282. int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
  283. {
  284. struct pci_dev *root;
  285. u32 lnkcap, lnkcap2;
  286. *mask = 0;
  287. if (!dev->pdev)
  288. return -EINVAL;
  289. root = dev->pdev->bus->self;
  290. /* we've been informed via and serverworks don't make the cut */
  291. if (root->vendor == PCI_VENDOR_ID_VIA ||
  292. root->vendor == PCI_VENDOR_ID_SERVERWORKS)
  293. return -EINVAL;
  294. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  295. pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
  296. if (lnkcap2) { /* PCIe r3.0-compliant */
  297. if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
  298. *mask |= DRM_PCIE_SPEED_25;
  299. if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
  300. *mask |= DRM_PCIE_SPEED_50;
  301. if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
  302. *mask |= DRM_PCIE_SPEED_80;
  303. } else { /* pre-r3.0 */
  304. if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
  305. *mask |= DRM_PCIE_SPEED_25;
  306. if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
  307. *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
  308. }
  309. DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
  310. return 0;
  311. }
  312. EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
  313. int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw)
  314. {
  315. struct pci_dev *root;
  316. u32 lnkcap;
  317. *mlw = 0;
  318. if (!dev->pdev)
  319. return -EINVAL;
  320. root = dev->pdev->bus->self;
  321. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  322. *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
  323. DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap);
  324. return 0;
  325. }
  326. EXPORT_SYMBOL(drm_pcie_get_max_link_width);
  327. #else
  328. void drm_pci_agp_destroy(struct drm_device *dev) {}
  329. int drm_irq_by_busid(struct drm_device *dev, void *data,
  330. struct drm_file *file_priv)
  331. {
  332. return -EINVAL;
  333. }
  334. #endif
  335. /**
  336. * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
  337. * @driver: DRM device driver
  338. * @pdriver: PCI device driver
  339. *
  340. * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
  341. * is deprecated and only used by dri1 drivers.
  342. */
  343. void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
  344. {
  345. struct drm_device *dev, *tmp;
  346. DRM_DEBUG("\n");
  347. if (!(driver->driver_features & DRIVER_LEGACY)) {
  348. WARN_ON(1);
  349. } else {
  350. list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
  351. legacy_dev_list) {
  352. list_del(&dev->legacy_dev_list);
  353. drm_put_dev(dev);
  354. }
  355. }
  356. DRM_INFO("Module unloaded\n");
  357. }
  358. EXPORT_SYMBOL(drm_legacy_pci_exit);