drm_pci.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. /*
  2. * Copyright 2003 José Fonseca.
  3. * Copyright 2003 Leif Delgass.
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  21. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  22. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. #include <linux/pci.h>
  25. #include <linux/slab.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/export.h>
  28. #include <drm/drmP.h>
  29. /**
  30. * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
  31. * @dev: DRM device
  32. * @size: size of block to allocate
  33. * @align: alignment of block
  34. *
  35. * Return: A handle to the allocated memory block on success or NULL on
  36. * failure.
  37. */
  38. drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
  39. {
  40. drm_dma_handle_t *dmah;
  41. unsigned long addr;
  42. size_t sz;
  43. /* pci_alloc_consistent only guarantees alignment to the smallest
  44. * PAGE_SIZE order which is greater than or equal to the requested size.
  45. * Return NULL here for now to make sure nobody tries for larger alignment
  46. */
  47. if (align > size)
  48. return NULL;
  49. dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
  50. if (!dmah)
  51. return NULL;
  52. dmah->size = size;
  53. dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
  54. if (dmah->vaddr == NULL) {
  55. kfree(dmah);
  56. return NULL;
  57. }
  58. memset(dmah->vaddr, 0, size);
  59. /* XXX - Is virt_to_page() legal for consistent mem? */
  60. /* Reserve */
  61. for (addr = (unsigned long)dmah->vaddr, sz = size;
  62. sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
  63. SetPageReserved(virt_to_page((void *)addr));
  64. }
  65. return dmah;
  66. }
  67. EXPORT_SYMBOL(drm_pci_alloc);
  68. /*
  69. * Free a PCI consistent memory block without freeing its descriptor.
  70. *
  71. * This function is for internal use in the Linux-specific DRM core code.
  72. */
  73. void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  74. {
  75. unsigned long addr;
  76. size_t sz;
  77. if (dmah->vaddr) {
  78. /* XXX - Is virt_to_page() legal for consistent mem? */
  79. /* Unreserve */
  80. for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
  81. sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
  82. ClearPageReserved(virt_to_page((void *)addr));
  83. }
  84. dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
  85. dmah->busaddr);
  86. }
  87. }
  88. /**
  89. * drm_pci_free - Free a PCI consistent memory block
  90. * @dev: DRM device
  91. * @dmah: handle to memory block
  92. */
  93. void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  94. {
  95. __drm_pci_free(dev, dmah);
  96. kfree(dmah);
  97. }
  98. EXPORT_SYMBOL(drm_pci_free);
  99. #ifdef CONFIG_PCI
  100. static int drm_get_pci_domain(struct drm_device *dev)
  101. {
  102. #ifndef __alpha__
  103. /* For historical reasons, drm_get_pci_domain() is busticated
  104. * on most archs and has to remain so for userspace interface
  105. * < 1.4, except on alpha which was right from the beginning
  106. */
  107. if (dev->if_version < 0x10004)
  108. return 0;
  109. #endif /* __alpha__ */
  110. return pci_domain_nr(dev->pdev->bus);
  111. }
  112. static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
  113. {
  114. int len, ret;
  115. master->unique_len = 40;
  116. master->unique_size = master->unique_len;
  117. master->unique = kmalloc(master->unique_size, GFP_KERNEL);
  118. if (master->unique == NULL)
  119. return -ENOMEM;
  120. len = snprintf(master->unique, master->unique_len,
  121. "pci:%04x:%02x:%02x.%d",
  122. drm_get_pci_domain(dev),
  123. dev->pdev->bus->number,
  124. PCI_SLOT(dev->pdev->devfn),
  125. PCI_FUNC(dev->pdev->devfn));
  126. if (len >= master->unique_len) {
  127. DRM_ERROR("buffer overflow");
  128. ret = -EINVAL;
  129. goto err;
  130. } else
  131. master->unique_len = len;
  132. return 0;
  133. err:
  134. return ret;
  135. }
  136. int drm_pci_set_unique(struct drm_device *dev,
  137. struct drm_master *master,
  138. struct drm_unique *u)
  139. {
  140. int domain, bus, slot, func, ret;
  141. master->unique_len = u->unique_len;
  142. master->unique_size = u->unique_len + 1;
  143. master->unique = kmalloc(master->unique_size, GFP_KERNEL);
  144. if (!master->unique) {
  145. ret = -ENOMEM;
  146. goto err;
  147. }
  148. if (copy_from_user(master->unique, u->unique, master->unique_len)) {
  149. ret = -EFAULT;
  150. goto err;
  151. }
  152. master->unique[master->unique_len] = '\0';
  153. /* Return error if the busid submitted doesn't match the device's actual
  154. * busid.
  155. */
  156. ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
  157. if (ret != 3) {
  158. ret = -EINVAL;
  159. goto err;
  160. }
  161. domain = bus >> 8;
  162. bus &= 0xff;
  163. if ((domain != drm_get_pci_domain(dev)) ||
  164. (bus != dev->pdev->bus->number) ||
  165. (slot != PCI_SLOT(dev->pdev->devfn)) ||
  166. (func != PCI_FUNC(dev->pdev->devfn))) {
  167. ret = -EINVAL;
  168. goto err;
  169. }
  170. return 0;
  171. err:
  172. return ret;
  173. }
  174. static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
  175. {
  176. if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
  177. (p->busnum & 0xff) != dev->pdev->bus->number ||
  178. p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
  179. return -EINVAL;
  180. p->irq = dev->pdev->irq;
  181. DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
  182. p->irq);
  183. return 0;
  184. }
  185. /**
  186. * drm_irq_by_busid - Get interrupt from bus ID
  187. * @dev: DRM device
  188. * @data: IOCTL parameter pointing to a drm_irq_busid structure
  189. * @file_priv: DRM file private.
  190. *
  191. * Finds the PCI device with the specified bus id and gets its IRQ number.
  192. * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
  193. * to that of the device that this DRM instance attached to.
  194. *
  195. * Return: 0 on success or a negative error code on failure.
  196. */
  197. int drm_irq_by_busid(struct drm_device *dev, void *data,
  198. struct drm_file *file_priv)
  199. {
  200. struct drm_irq_busid *p = data;
  201. if (drm_core_check_feature(dev, DRIVER_MODESET))
  202. return -EINVAL;
  203. /* UMS was only ever support on PCI devices. */
  204. if (WARN_ON(!dev->pdev))
  205. return -EINVAL;
  206. if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
  207. return -EINVAL;
  208. return drm_pci_irq_by_busid(dev, p);
  209. }
  210. static void drm_pci_agp_init(struct drm_device *dev)
  211. {
  212. if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
  213. if (drm_pci_device_is_agp(dev))
  214. dev->agp = drm_agp_init(dev);
  215. if (dev->agp) {
  216. dev->agp->agp_mtrr = arch_phys_wc_add(
  217. dev->agp->agp_info.aper_base,
  218. dev->agp->agp_info.aper_size *
  219. 1024 * 1024);
  220. }
  221. }
  222. }
  223. void drm_pci_agp_destroy(struct drm_device *dev)
  224. {
  225. if (dev->agp) {
  226. arch_phys_wc_del(dev->agp->agp_mtrr);
  227. drm_agp_clear(dev);
  228. kfree(dev->agp);
  229. dev->agp = NULL;
  230. }
  231. }
  232. static struct drm_bus drm_pci_bus = {
  233. .set_busid = drm_pci_set_busid,
  234. };
  235. /**
  236. * drm_get_pci_dev - Register a PCI device with the DRM subsystem
  237. * @pdev: PCI device
  238. * @ent: entry from the PCI ID table that matches @pdev
  239. * @driver: DRM device driver
  240. *
  241. * Attempt to gets inter module "drm" information. If we are first
  242. * then register the character device and inter module information.
  243. * Try and register, if we fail to register, backout previous work.
  244. *
  245. * Return: 0 on success or a negative error code on failure.
  246. */
  247. int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
  248. struct drm_driver *driver)
  249. {
  250. struct drm_device *dev;
  251. int ret;
  252. DRM_DEBUG("\n");
  253. dev = drm_dev_alloc(driver, &pdev->dev);
  254. if (!dev)
  255. return -ENOMEM;
  256. ret = pci_enable_device(pdev);
  257. if (ret)
  258. goto err_free;
  259. dev->pdev = pdev;
  260. #ifdef __alpha__
  261. dev->hose = pdev->sysdata;
  262. #endif
  263. if (drm_core_check_feature(dev, DRIVER_MODESET))
  264. pci_set_drvdata(pdev, dev);
  265. drm_pci_agp_init(dev);
  266. ret = drm_dev_register(dev, ent->driver_data);
  267. if (ret)
  268. goto err_agp;
  269. DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
  270. driver->name, driver->major, driver->minor, driver->patchlevel,
  271. driver->date, pci_name(pdev), dev->primary->index);
  272. /* No locking needed since shadow-attach is single-threaded since it may
  273. * only be called from the per-driver module init hook. */
  274. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  275. list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
  276. return 0;
  277. err_agp:
  278. drm_pci_agp_destroy(dev);
  279. pci_disable_device(pdev);
  280. err_free:
  281. drm_dev_unref(dev);
  282. return ret;
  283. }
  284. EXPORT_SYMBOL(drm_get_pci_dev);
  285. /**
  286. * drm_pci_init - Register matching PCI devices with the DRM subsystem
  287. * @driver: DRM device driver
  288. * @pdriver: PCI device driver
  289. *
  290. * Initializes a drm_device structures, registering the stubs and initializing
  291. * the AGP device.
  292. *
  293. * Return: 0 on success or a negative error code on failure.
  294. */
  295. int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
  296. {
  297. struct pci_dev *pdev = NULL;
  298. const struct pci_device_id *pid;
  299. int i;
  300. DRM_DEBUG("\n");
  301. driver->bus = &drm_pci_bus;
  302. if (driver->driver_features & DRIVER_MODESET)
  303. return pci_register_driver(pdriver);
  304. /* If not using KMS, fall back to stealth mode manual scanning. */
  305. INIT_LIST_HEAD(&driver->legacy_dev_list);
  306. for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
  307. pid = &pdriver->id_table[i];
  308. /* Loop around setting up a DRM device for each PCI device
  309. * matching our ID and device class. If we had the internal
  310. * function that pci_get_subsys and pci_get_class used, we'd
  311. * be able to just pass pid in instead of doing a two-stage
  312. * thing.
  313. */
  314. pdev = NULL;
  315. while ((pdev =
  316. pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
  317. pid->subdevice, pdev)) != NULL) {
  318. if ((pdev->class & pid->class_mask) != pid->class)
  319. continue;
  320. /* stealth mode requires a manual probe */
  321. pci_dev_get(pdev);
  322. drm_get_pci_dev(pdev, pid, driver);
  323. }
  324. }
  325. return 0;
  326. }
  327. int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
  328. {
  329. struct pci_dev *root;
  330. u32 lnkcap, lnkcap2;
  331. *mask = 0;
  332. if (!dev->pdev)
  333. return -EINVAL;
  334. root = dev->pdev->bus->self;
  335. /* we've been informed via and serverworks don't make the cut */
  336. if (root->vendor == PCI_VENDOR_ID_VIA ||
  337. root->vendor == PCI_VENDOR_ID_SERVERWORKS)
  338. return -EINVAL;
  339. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  340. pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
  341. if (lnkcap2) { /* PCIe r3.0-compliant */
  342. if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
  343. *mask |= DRM_PCIE_SPEED_25;
  344. if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
  345. *mask |= DRM_PCIE_SPEED_50;
  346. if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
  347. *mask |= DRM_PCIE_SPEED_80;
  348. } else { /* pre-r3.0 */
  349. if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
  350. *mask |= DRM_PCIE_SPEED_25;
  351. if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
  352. *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
  353. }
  354. DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
  355. return 0;
  356. }
  357. EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
  358. #else
  359. int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
  360. {
  361. return -1;
  362. }
  363. void drm_pci_agp_destroy(struct drm_device *dev) {}
  364. int drm_irq_by_busid(struct drm_device *dev, void *data,
  365. struct drm_file *file_priv)
  366. {
  367. return -EINVAL;
  368. }
  369. int drm_pci_set_unique(struct drm_device *dev,
  370. struct drm_master *master,
  371. struct drm_unique *u)
  372. {
  373. return -EINVAL;
  374. }
  375. #endif
  376. EXPORT_SYMBOL(drm_pci_init);
  377. /**
  378. * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem
  379. * @driver: DRM device driver
  380. * @pdriver: PCI device driver
  381. *
  382. * Unregisters one or more devices matched by a PCI driver from the DRM
  383. * subsystem.
  384. */
  385. void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
  386. {
  387. struct drm_device *dev, *tmp;
  388. DRM_DEBUG("\n");
  389. if (driver->driver_features & DRIVER_MODESET) {
  390. pci_unregister_driver(pdriver);
  391. } else {
  392. list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
  393. legacy_dev_list) {
  394. list_del(&dev->legacy_dev_list);
  395. drm_put_dev(dev);
  396. }
  397. }
  398. DRM_INFO("Module unloaded\n");
  399. }
  400. EXPORT_SYMBOL(drm_pci_exit);