drm_pci.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
  2. /**
  3. * \file drm_pci.c
  4. * \brief Functions and ioctls to manage PCI memory
  5. *
  6. * \warning These interfaces aren't stable yet.
  7. *
  8. * \todo Implement the remaining ioctl's for the PCI pools.
  9. * \todo The wrappers here are so thin that they would be better off inlined..
  10. *
  11. * \author José Fonseca <jrfonseca@tungstengraphics.com>
  12. * \author Leif Delgass <ldelgass@retinalburn.net>
  13. */
  14. /*
  15. * Copyright 2003 José Fonseca.
  16. * Copyright 2003 Leif Delgass.
  17. * All Rights Reserved.
  18. *
  19. * Permission is hereby granted, free of charge, to any person obtaining a
  20. * copy of this software and associated documentation files (the "Software"),
  21. * to deal in the Software without restriction, including without limitation
  22. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  23. * and/or sell copies of the Software, and to permit persons to whom the
  24. * Software is furnished to do so, subject to the following conditions:
  25. *
  26. * The above copyright notice and this permission notice (including the next
  27. * paragraph) shall be included in all copies or substantial portions of the
  28. * Software.
  29. *
  30. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  31. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  32. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  33. * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  35. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  36. */
  37. #include <linux/pci.h>
  38. #include <linux/slab.h>
  39. #include <linux/dma-mapping.h>
  40. #include <linux/export.h>
  41. #include <drm/drmP.h>
  42. /**********************************************************************/
  43. /** \name PCI memory */
  44. /*@{*/
  45. /**
  46. * \brief Allocate a PCI consistent memory block, for DMA.
  47. */
  48. drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
  49. {
  50. drm_dma_handle_t *dmah;
  51. unsigned long addr;
  52. size_t sz;
  53. /* pci_alloc_consistent only guarantees alignment to the smallest
  54. * PAGE_SIZE order which is greater than or equal to the requested size.
  55. * Return NULL here for now to make sure nobody tries for larger alignment
  56. */
  57. if (align > size)
  58. return NULL;
  59. dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
  60. if (!dmah)
  61. return NULL;
  62. dmah->size = size;
  63. dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
  64. if (dmah->vaddr == NULL) {
  65. kfree(dmah);
  66. return NULL;
  67. }
  68. memset(dmah->vaddr, 0, size);
  69. /* XXX - Is virt_to_page() legal for consistent mem? */
  70. /* Reserve */
  71. for (addr = (unsigned long)dmah->vaddr, sz = size;
  72. sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
  73. SetPageReserved(virt_to_page((void *)addr));
  74. }
  75. return dmah;
  76. }
  77. EXPORT_SYMBOL(drm_pci_alloc);
  78. /**
  79. * \brief Free a PCI consistent memory block without freeing its descriptor.
  80. *
  81. * This function is for internal use in the Linux-specific DRM core code.
  82. */
  83. void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  84. {
  85. unsigned long addr;
  86. size_t sz;
  87. if (dmah->vaddr) {
  88. /* XXX - Is virt_to_page() legal for consistent mem? */
  89. /* Unreserve */
  90. for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
  91. sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
  92. ClearPageReserved(virt_to_page((void *)addr));
  93. }
  94. dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
  95. dmah->busaddr);
  96. }
  97. }
  98. /**
  99. * \brief Free a PCI consistent memory block
  100. */
  101. void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  102. {
  103. __drm_pci_free(dev, dmah);
  104. kfree(dmah);
  105. }
  106. EXPORT_SYMBOL(drm_pci_free);
  107. #ifdef CONFIG_PCI
  108. static int drm_get_pci_domain(struct drm_device *dev)
  109. {
  110. #ifndef __alpha__
  111. /* For historical reasons, drm_get_pci_domain() is busticated
  112. * on most archs and has to remain so for userspace interface
  113. * < 1.4, except on alpha which was right from the beginning
  114. */
  115. if (dev->if_version < 0x10004)
  116. return 0;
  117. #endif /* __alpha__ */
  118. return pci_domain_nr(dev->pdev->bus);
  119. }
  120. static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
  121. {
  122. int len, ret;
  123. master->unique_len = 40;
  124. master->unique_size = master->unique_len;
  125. master->unique = kmalloc(master->unique_size, GFP_KERNEL);
  126. if (master->unique == NULL)
  127. return -ENOMEM;
  128. len = snprintf(master->unique, master->unique_len,
  129. "pci:%04x:%02x:%02x.%d",
  130. drm_get_pci_domain(dev),
  131. dev->pdev->bus->number,
  132. PCI_SLOT(dev->pdev->devfn),
  133. PCI_FUNC(dev->pdev->devfn));
  134. if (len >= master->unique_len) {
  135. DRM_ERROR("buffer overflow");
  136. ret = -EINVAL;
  137. goto err;
  138. } else
  139. master->unique_len = len;
  140. return 0;
  141. err:
  142. return ret;
  143. }
  144. int drm_pci_set_unique(struct drm_device *dev,
  145. struct drm_master *master,
  146. struct drm_unique *u)
  147. {
  148. int domain, bus, slot, func, ret;
  149. master->unique_len = u->unique_len;
  150. master->unique_size = u->unique_len + 1;
  151. master->unique = kmalloc(master->unique_size, GFP_KERNEL);
  152. if (!master->unique) {
  153. ret = -ENOMEM;
  154. goto err;
  155. }
  156. if (copy_from_user(master->unique, u->unique, master->unique_len)) {
  157. ret = -EFAULT;
  158. goto err;
  159. }
  160. master->unique[master->unique_len] = '\0';
  161. /* Return error if the busid submitted doesn't match the device's actual
  162. * busid.
  163. */
  164. ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
  165. if (ret != 3) {
  166. ret = -EINVAL;
  167. goto err;
  168. }
  169. domain = bus >> 8;
  170. bus &= 0xff;
  171. if ((domain != drm_get_pci_domain(dev)) ||
  172. (bus != dev->pdev->bus->number) ||
  173. (slot != PCI_SLOT(dev->pdev->devfn)) ||
  174. (func != PCI_FUNC(dev->pdev->devfn))) {
  175. ret = -EINVAL;
  176. goto err;
  177. }
  178. return 0;
  179. err:
  180. return ret;
  181. }
  182. static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
  183. {
  184. if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
  185. (p->busnum & 0xff) != dev->pdev->bus->number ||
  186. p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
  187. return -EINVAL;
  188. p->irq = dev->pdev->irq;
  189. DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
  190. p->irq);
  191. return 0;
  192. }
  193. /**
  194. * Get interrupt from bus id.
  195. *
  196. * \param inode device inode.
  197. * \param file_priv DRM file private.
  198. * \param cmd command.
  199. * \param arg user argument, pointing to a drm_irq_busid structure.
  200. * \return zero on success or a negative number on failure.
  201. *
  202. * Finds the PCI device with the specified bus id and gets its IRQ number.
  203. * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
  204. * to that of the device that this DRM instance attached to.
  205. */
  206. int drm_irq_by_busid(struct drm_device *dev, void *data,
  207. struct drm_file *file_priv)
  208. {
  209. struct drm_irq_busid *p = data;
  210. if (drm_core_check_feature(dev, DRIVER_MODESET))
  211. return -EINVAL;
  212. /* UMS was only ever support on PCI devices. */
  213. if (WARN_ON(!dev->pdev))
  214. return -EINVAL;
  215. if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
  216. return -EINVAL;
  217. return drm_pci_irq_by_busid(dev, p);
  218. }
  219. static void drm_pci_agp_init(struct drm_device *dev)
  220. {
  221. if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
  222. if (drm_pci_device_is_agp(dev))
  223. dev->agp = drm_agp_init(dev);
  224. if (dev->agp) {
  225. dev->agp->agp_mtrr = arch_phys_wc_add(
  226. dev->agp->agp_info.aper_base,
  227. dev->agp->agp_info.aper_size *
  228. 1024 * 1024);
  229. }
  230. }
  231. }
  232. void drm_pci_agp_destroy(struct drm_device *dev)
  233. {
  234. if (dev->agp) {
  235. arch_phys_wc_del(dev->agp->agp_mtrr);
  236. drm_agp_clear(dev);
  237. kfree(dev->agp);
  238. dev->agp = NULL;
  239. }
  240. }
  241. static struct drm_bus drm_pci_bus = {
  242. .set_busid = drm_pci_set_busid,
  243. };
  244. /**
  245. * Register.
  246. *
  247. * \param pdev - PCI device structure
  248. * \param ent entry from the PCI ID table with device type flags
  249. * \return zero on success or a negative number on failure.
  250. *
  251. * Attempt to gets inter module "drm" information. If we are first
  252. * then register the character device and inter module information.
  253. * Try and register, if we fail to register, backout previous work.
  254. */
  255. int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
  256. struct drm_driver *driver)
  257. {
  258. struct drm_device *dev;
  259. int ret;
  260. DRM_DEBUG("\n");
  261. dev = drm_dev_alloc(driver, &pdev->dev);
  262. if (!dev)
  263. return -ENOMEM;
  264. ret = pci_enable_device(pdev);
  265. if (ret)
  266. goto err_free;
  267. dev->pdev = pdev;
  268. #ifdef __alpha__
  269. dev->hose = pdev->sysdata;
  270. #endif
  271. if (drm_core_check_feature(dev, DRIVER_MODESET))
  272. pci_set_drvdata(pdev, dev);
  273. drm_pci_agp_init(dev);
  274. ret = drm_dev_register(dev, ent->driver_data);
  275. if (ret)
  276. goto err_agp;
  277. DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
  278. driver->name, driver->major, driver->minor, driver->patchlevel,
  279. driver->date, pci_name(pdev), dev->primary->index);
  280. /* No locking needed since shadow-attach is single-threaded since it may
  281. * only be called from the per-driver module init hook. */
  282. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  283. list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
  284. return 0;
  285. err_agp:
  286. drm_pci_agp_destroy(dev);
  287. pci_disable_device(pdev);
  288. err_free:
  289. drm_dev_unref(dev);
  290. return ret;
  291. }
  292. EXPORT_SYMBOL(drm_get_pci_dev);
  293. /**
  294. * PCI device initialization. Called direct from modules at load time.
  295. *
  296. * \return zero on success or a negative number on failure.
  297. *
  298. * Initializes a drm_device structures,registering the
  299. * stubs and initializing the AGP device.
  300. *
  301. * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
  302. * after the initialization for driver customization.
  303. */
  304. int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
  305. {
  306. struct pci_dev *pdev = NULL;
  307. const struct pci_device_id *pid;
  308. int i;
  309. DRM_DEBUG("\n");
  310. driver->kdriver.pci = pdriver;
  311. driver->bus = &drm_pci_bus;
  312. if (driver->driver_features & DRIVER_MODESET)
  313. return pci_register_driver(pdriver);
  314. /* If not using KMS, fall back to stealth mode manual scanning. */
  315. INIT_LIST_HEAD(&driver->legacy_dev_list);
  316. for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
  317. pid = &pdriver->id_table[i];
  318. /* Loop around setting up a DRM device for each PCI device
  319. * matching our ID and device class. If we had the internal
  320. * function that pci_get_subsys and pci_get_class used, we'd
  321. * be able to just pass pid in instead of doing a two-stage
  322. * thing.
  323. */
  324. pdev = NULL;
  325. while ((pdev =
  326. pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
  327. pid->subdevice, pdev)) != NULL) {
  328. if ((pdev->class & pid->class_mask) != pid->class)
  329. continue;
  330. /* stealth mode requires a manual probe */
  331. pci_dev_get(pdev);
  332. drm_get_pci_dev(pdev, pid, driver);
  333. }
  334. }
  335. return 0;
  336. }
  337. int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
  338. {
  339. struct pci_dev *root;
  340. u32 lnkcap, lnkcap2;
  341. *mask = 0;
  342. if (!dev->pdev)
  343. return -EINVAL;
  344. root = dev->pdev->bus->self;
  345. /* we've been informed via and serverworks don't make the cut */
  346. if (root->vendor == PCI_VENDOR_ID_VIA ||
  347. root->vendor == PCI_VENDOR_ID_SERVERWORKS)
  348. return -EINVAL;
  349. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  350. pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
  351. if (lnkcap2) { /* PCIe r3.0-compliant */
  352. if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
  353. *mask |= DRM_PCIE_SPEED_25;
  354. if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
  355. *mask |= DRM_PCIE_SPEED_50;
  356. if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
  357. *mask |= DRM_PCIE_SPEED_80;
  358. } else { /* pre-r3.0 */
  359. if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
  360. *mask |= DRM_PCIE_SPEED_25;
  361. if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
  362. *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
  363. }
  364. DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
  365. return 0;
  366. }
  367. EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
  368. #else
  369. int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
  370. {
  371. return -1;
  372. }
  373. void drm_pci_agp_destroy(struct drm_device *dev) {}
  374. int drm_irq_by_busid(struct drm_device *dev, void *data,
  375. struct drm_file *file_priv)
  376. {
  377. return -EINVAL;
  378. }
  379. int drm_pci_set_unique(struct drm_device *dev,
  380. struct drm_master *master,
  381. struct drm_unique *u)
  382. {
  383. return -EINVAL;
  384. }
  385. #endif
  386. EXPORT_SYMBOL(drm_pci_init);
  387. /*@}*/
  388. void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
  389. {
  390. struct drm_device *dev, *tmp;
  391. DRM_DEBUG("\n");
  392. if (driver->driver_features & DRIVER_MODESET) {
  393. pci_unregister_driver(pdriver);
  394. } else {
  395. list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
  396. legacy_dev_list) {
  397. list_del(&dev->legacy_dev_list);
  398. drm_put_dev(dev);
  399. }
  400. }
  401. DRM_INFO("Module unloaded\n");
  402. }
  403. EXPORT_SYMBOL(drm_pci_exit);