ccp-pci.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. * Author: Gary R Hook <gary.hook@amd.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/device.h>
  16. #include <linux/pci.h>
  17. #include <linux/pci_ids.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/kthread.h>
  20. #include <linux/sched.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/delay.h>
  24. #include <linux/ccp.h>
  25. #include "ccp-dev.h"
  26. #define MSIX_VECTORS 2
  27. struct ccp_msix {
  28. u32 vector;
  29. char name[16];
  30. };
  31. struct ccp_pci {
  32. int msix_count;
  33. struct ccp_msix msix[MSIX_VECTORS];
  34. };
  35. static int ccp_get_msix_irqs(struct ccp_device *ccp)
  36. {
  37. struct ccp_pci *ccp_pci = ccp->dev_specific;
  38. struct device *dev = ccp->dev;
  39. struct pci_dev *pdev = to_pci_dev(dev);
  40. struct msix_entry msix_entry[MSIX_VECTORS];
  41. unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
  42. int v, ret;
  43. for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
  44. msix_entry[v].entry = v;
  45. ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
  46. if (ret < 0)
  47. return ret;
  48. ccp_pci->msix_count = ret;
  49. for (v = 0; v < ccp_pci->msix_count; v++) {
  50. /* Set the interrupt names and request the irqs */
  51. snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
  52. ccp->name, v);
  53. ccp_pci->msix[v].vector = msix_entry[v].vector;
  54. ret = request_irq(ccp_pci->msix[v].vector,
  55. ccp->vdata->perform->irqhandler,
  56. 0, ccp_pci->msix[v].name, dev);
  57. if (ret) {
  58. dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
  59. ret);
  60. goto e_irq;
  61. }
  62. }
  63. return 0;
  64. e_irq:
  65. while (v--)
  66. free_irq(ccp_pci->msix[v].vector, dev);
  67. pci_disable_msix(pdev);
  68. ccp_pci->msix_count = 0;
  69. return ret;
  70. }
  71. static int ccp_get_msi_irq(struct ccp_device *ccp)
  72. {
  73. struct device *dev = ccp->dev;
  74. struct pci_dev *pdev = to_pci_dev(dev);
  75. int ret;
  76. ret = pci_enable_msi(pdev);
  77. if (ret)
  78. return ret;
  79. ccp->irq = pdev->irq;
  80. ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
  81. ccp->name, dev);
  82. if (ret) {
  83. dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
  84. goto e_msi;
  85. }
  86. return 0;
  87. e_msi:
  88. pci_disable_msi(pdev);
  89. return ret;
  90. }
  91. static int ccp_get_irqs(struct ccp_device *ccp)
  92. {
  93. struct device *dev = ccp->dev;
  94. int ret;
  95. ret = ccp_get_msix_irqs(ccp);
  96. if (!ret)
  97. return 0;
  98. /* Couldn't get MSI-X vectors, try MSI */
  99. dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
  100. ret = ccp_get_msi_irq(ccp);
  101. if (!ret)
  102. return 0;
  103. /* Couldn't get MSI interrupt */
  104. dev_notice(dev, "could not enable MSI (%d)\n", ret);
  105. return ret;
  106. }
  107. static void ccp_free_irqs(struct ccp_device *ccp)
  108. {
  109. struct ccp_pci *ccp_pci = ccp->dev_specific;
  110. struct device *dev = ccp->dev;
  111. struct pci_dev *pdev = to_pci_dev(dev);
  112. if (ccp_pci->msix_count) {
  113. while (ccp_pci->msix_count--)
  114. free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
  115. dev);
  116. pci_disable_msix(pdev);
  117. } else if (ccp->irq) {
  118. free_irq(ccp->irq, dev);
  119. pci_disable_msi(pdev);
  120. }
  121. ccp->irq = 0;
  122. }
  123. static int ccp_find_mmio_area(struct ccp_device *ccp)
  124. {
  125. struct device *dev = ccp->dev;
  126. struct pci_dev *pdev = to_pci_dev(dev);
  127. resource_size_t io_len;
  128. unsigned long io_flags;
  129. io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
  130. io_len = pci_resource_len(pdev, ccp->vdata->bar);
  131. if ((io_flags & IORESOURCE_MEM) &&
  132. (io_len >= (ccp->vdata->offset + 0x800)))
  133. return ccp->vdata->bar;
  134. return -EIO;
  135. }
  136. static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  137. {
  138. struct ccp_device *ccp;
  139. struct ccp_pci *ccp_pci;
  140. struct device *dev = &pdev->dev;
  141. unsigned int bar;
  142. int ret;
  143. ret = -ENOMEM;
  144. ccp = ccp_alloc_struct(dev);
  145. if (!ccp)
  146. goto e_err;
  147. ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
  148. if (!ccp_pci)
  149. goto e_err;
  150. ccp->dev_specific = ccp_pci;
  151. ccp->vdata = (struct ccp_vdata *)id->driver_data;
  152. if (!ccp->vdata || !ccp->vdata->version) {
  153. ret = -ENODEV;
  154. dev_err(dev, "missing driver data\n");
  155. goto e_err;
  156. }
  157. ccp->get_irq = ccp_get_irqs;
  158. ccp->free_irq = ccp_free_irqs;
  159. ret = pci_request_regions(pdev, "ccp");
  160. if (ret) {
  161. dev_err(dev, "pci_request_regions failed (%d)\n", ret);
  162. goto e_err;
  163. }
  164. ret = pci_enable_device(pdev);
  165. if (ret) {
  166. dev_err(dev, "pci_enable_device failed (%d)\n", ret);
  167. goto e_regions;
  168. }
  169. pci_set_master(pdev);
  170. ret = ccp_find_mmio_area(ccp);
  171. if (ret < 0)
  172. goto e_device;
  173. bar = ret;
  174. ret = -EIO;
  175. ccp->io_map = pci_iomap(pdev, bar, 0);
  176. if (!ccp->io_map) {
  177. dev_err(dev, "pci_iomap failed\n");
  178. goto e_device;
  179. }
  180. ccp->io_regs = ccp->io_map + ccp->vdata->offset;
  181. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
  182. if (ret) {
  183. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  184. if (ret) {
  185. dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
  186. ret);
  187. goto e_iomap;
  188. }
  189. }
  190. dev_set_drvdata(dev, ccp);
  191. if (ccp->vdata->setup)
  192. ccp->vdata->setup(ccp);
  193. ret = ccp->vdata->perform->init(ccp);
  194. if (ret)
  195. goto e_iomap;
  196. dev_notice(dev, "enabled\n");
  197. return 0;
  198. e_iomap:
  199. pci_iounmap(pdev, ccp->io_map);
  200. e_device:
  201. pci_disable_device(pdev);
  202. e_regions:
  203. pci_release_regions(pdev);
  204. e_err:
  205. dev_notice(dev, "initialization failed\n");
  206. return ret;
  207. }
  208. static void ccp_pci_remove(struct pci_dev *pdev)
  209. {
  210. struct device *dev = &pdev->dev;
  211. struct ccp_device *ccp = dev_get_drvdata(dev);
  212. if (!ccp)
  213. return;
  214. ccp->vdata->perform->destroy(ccp);
  215. pci_iounmap(pdev, ccp->io_map);
  216. pci_disable_device(pdev);
  217. pci_release_regions(pdev);
  218. dev_notice(dev, "disabled\n");
  219. }
  220. #ifdef CONFIG_PM
  221. static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  222. {
  223. struct device *dev = &pdev->dev;
  224. struct ccp_device *ccp = dev_get_drvdata(dev);
  225. unsigned long flags;
  226. unsigned int i;
  227. spin_lock_irqsave(&ccp->cmd_lock, flags);
  228. ccp->suspending = 1;
  229. /* Wake all the queue kthreads to prepare for suspend */
  230. for (i = 0; i < ccp->cmd_q_count; i++)
  231. wake_up_process(ccp->cmd_q[i].kthread);
  232. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  233. /* Wait for all queue kthreads to say they're done */
  234. while (!ccp_queues_suspended(ccp))
  235. wait_event_interruptible(ccp->suspend_queue,
  236. ccp_queues_suspended(ccp));
  237. return 0;
  238. }
  239. static int ccp_pci_resume(struct pci_dev *pdev)
  240. {
  241. struct device *dev = &pdev->dev;
  242. struct ccp_device *ccp = dev_get_drvdata(dev);
  243. unsigned long flags;
  244. unsigned int i;
  245. spin_lock_irqsave(&ccp->cmd_lock, flags);
  246. ccp->suspending = 0;
  247. /* Wake up all the kthreads */
  248. for (i = 0; i < ccp->cmd_q_count; i++) {
  249. ccp->cmd_q[i].suspended = 0;
  250. wake_up_process(ccp->cmd_q[i].kthread);
  251. }
  252. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  253. return 0;
  254. }
  255. #endif
  256. static const struct pci_device_id ccp_pci_table[] = {
  257. { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
  258. { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
  259. { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
  260. /* Last entry must be zero */
  261. { 0, }
  262. };
  263. MODULE_DEVICE_TABLE(pci, ccp_pci_table);
  264. static struct pci_driver ccp_pci_driver = {
  265. .name = "ccp",
  266. .id_table = ccp_pci_table,
  267. .probe = ccp_pci_probe,
  268. .remove = ccp_pci_remove,
  269. #ifdef CONFIG_PM
  270. .suspend = ccp_pci_suspend,
  271. .resume = ccp_pci_resume,
  272. #endif
  273. };
  274. int ccp_pci_init(void)
  275. {
  276. return pci_register_driver(&ccp_pci_driver);
  277. }
  278. void ccp_pci_exit(void)
  279. {
  280. pci_unregister_driver(&ccp_pci_driver);
  281. }