ccp-pci.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2013 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/pci.h>
  15. #include <linux/pci_ids.h>
  16. #include <linux/kthread.h>
  17. #include <linux/sched.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/delay.h>
  21. #include <linux/ccp.h>
  22. #include "ccp-dev.h"
  23. #define IO_BAR 2
  24. #define MSIX_VECTORS 2
  25. struct ccp_msix {
  26. u32 vector;
  27. char name[16];
  28. };
  29. struct ccp_pci {
  30. int msix_count;
  31. struct ccp_msix msix[MSIX_VECTORS];
  32. };
  33. static int ccp_get_msix_irqs(struct ccp_device *ccp)
  34. {
  35. struct ccp_pci *ccp_pci = ccp->dev_specific;
  36. struct device *dev = ccp->dev;
  37. struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
  38. struct msix_entry msix_entry[MSIX_VECTORS];
  39. unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
  40. int v, ret;
  41. for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
  42. msix_entry[v].entry = v;
  43. while ((ret = pci_enable_msix(pdev, msix_entry, v)) > 0)
  44. v = ret;
  45. if (ret)
  46. return ret;
  47. ccp_pci->msix_count = v;
  48. for (v = 0; v < ccp_pci->msix_count; v++) {
  49. /* Set the interrupt names and request the irqs */
  50. snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
  51. ccp_pci->msix[v].vector = msix_entry[v].vector;
  52. ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler,
  53. 0, ccp_pci->msix[v].name, dev);
  54. if (ret) {
  55. dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
  56. ret);
  57. goto e_irq;
  58. }
  59. }
  60. return 0;
  61. e_irq:
  62. while (v--)
  63. free_irq(ccp_pci->msix[v].vector, dev);
  64. pci_disable_msix(pdev);
  65. ccp_pci->msix_count = 0;
  66. return ret;
  67. }
  68. static int ccp_get_msi_irq(struct ccp_device *ccp)
  69. {
  70. struct device *dev = ccp->dev;
  71. struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
  72. int ret;
  73. ret = pci_enable_msi(pdev);
  74. if (ret)
  75. return ret;
  76. ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
  77. if (ret) {
  78. dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
  79. goto e_msi;
  80. }
  81. return 0;
  82. e_msi:
  83. pci_disable_msi(pdev);
  84. return ret;
  85. }
  86. static int ccp_get_irqs(struct ccp_device *ccp)
  87. {
  88. struct device *dev = ccp->dev;
  89. int ret;
  90. ret = ccp_get_msix_irqs(ccp);
  91. if (!ret)
  92. return 0;
  93. /* Couldn't get MSI-X vectors, try MSI */
  94. dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
  95. ret = ccp_get_msi_irq(ccp);
  96. if (!ret)
  97. return 0;
  98. /* Couldn't get MSI interrupt */
  99. dev_notice(dev, "could not enable MSI (%d)\n", ret);
  100. return ret;
  101. }
  102. static void ccp_free_irqs(struct ccp_device *ccp)
  103. {
  104. struct ccp_pci *ccp_pci = ccp->dev_specific;
  105. struct device *dev = ccp->dev;
  106. struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
  107. if (ccp_pci->msix_count) {
  108. while (ccp_pci->msix_count--)
  109. free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
  110. dev);
  111. pci_disable_msix(pdev);
  112. } else {
  113. free_irq(pdev->irq, dev);
  114. pci_disable_msi(pdev);
  115. }
  116. }
  117. static int ccp_find_mmio_area(struct ccp_device *ccp)
  118. {
  119. struct device *dev = ccp->dev;
  120. struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
  121. resource_size_t io_len;
  122. unsigned long io_flags;
  123. int bar;
  124. io_flags = pci_resource_flags(pdev, IO_BAR);
  125. io_len = pci_resource_len(pdev, IO_BAR);
  126. if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
  127. return IO_BAR;
  128. for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
  129. io_flags = pci_resource_flags(pdev, bar);
  130. io_len = pci_resource_len(pdev, bar);
  131. if ((io_flags & IORESOURCE_MEM) &&
  132. (io_len >= (IO_OFFSET + 0x800)))
  133. return bar;
  134. }
  135. return -EIO;
  136. }
  137. static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  138. {
  139. struct ccp_device *ccp;
  140. struct ccp_pci *ccp_pci;
  141. struct device *dev = &pdev->dev;
  142. unsigned int bar;
  143. int ret;
  144. ret = -ENOMEM;
  145. ccp = ccp_alloc_struct(dev);
  146. if (!ccp)
  147. goto e_err;
  148. ccp_pci = kzalloc(sizeof(*ccp_pci), GFP_KERNEL);
  149. if (!ccp_pci) {
  150. ret = -ENOMEM;
  151. goto e_free1;
  152. }
  153. ccp->dev_specific = ccp_pci;
  154. ccp->get_irq = ccp_get_irqs;
  155. ccp->free_irq = ccp_free_irqs;
  156. ret = pci_request_regions(pdev, "ccp");
  157. if (ret) {
  158. dev_err(dev, "pci_request_regions failed (%d)\n", ret);
  159. goto e_free2;
  160. }
  161. ret = pci_enable_device(pdev);
  162. if (ret) {
  163. dev_err(dev, "pci_enable_device failed (%d)\n", ret);
  164. goto e_regions;
  165. }
  166. pci_set_master(pdev);
  167. ret = ccp_find_mmio_area(ccp);
  168. if (ret < 0)
  169. goto e_device;
  170. bar = ret;
  171. ret = -EIO;
  172. ccp->io_map = pci_iomap(pdev, bar, 0);
  173. if (ccp->io_map == NULL) {
  174. dev_err(dev, "pci_iomap failed\n");
  175. goto e_device;
  176. }
  177. ccp->io_regs = ccp->io_map + IO_OFFSET;
  178. ret = dma_set_mask(dev, DMA_BIT_MASK(48));
  179. if (ret == 0) {
  180. ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
  181. if (ret) {
  182. dev_err(dev,
  183. "pci_set_consistent_dma_mask failed (%d)\n",
  184. ret);
  185. goto e_bar0;
  186. }
  187. } else {
  188. ret = dma_set_mask(dev, DMA_BIT_MASK(32));
  189. if (ret) {
  190. dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
  191. goto e_bar0;
  192. }
  193. }
  194. dev_set_drvdata(dev, ccp);
  195. ret = ccp_init(ccp);
  196. if (ret)
  197. goto e_bar0;
  198. dev_notice(dev, "enabled\n");
  199. return 0;
  200. e_bar0:
  201. pci_iounmap(pdev, ccp->io_map);
  202. e_device:
  203. pci_disable_device(pdev);
  204. e_regions:
  205. pci_release_regions(pdev);
  206. e_free2:
  207. kfree(ccp_pci);
  208. e_free1:
  209. kfree(ccp);
  210. e_err:
  211. dev_notice(dev, "initialization failed\n");
  212. return ret;
  213. }
  214. static void ccp_pci_remove(struct pci_dev *pdev)
  215. {
  216. struct device *dev = &pdev->dev;
  217. struct ccp_device *ccp = dev_get_drvdata(dev);
  218. if (!ccp)
  219. return;
  220. ccp_destroy(ccp);
  221. pci_iounmap(pdev, ccp->io_map);
  222. pci_disable_device(pdev);
  223. pci_release_regions(pdev);
  224. kfree(ccp);
  225. dev_notice(dev, "disabled\n");
  226. }
  227. #ifdef CONFIG_PM
  228. static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  229. {
  230. struct device *dev = &pdev->dev;
  231. struct ccp_device *ccp = dev_get_drvdata(dev);
  232. unsigned long flags;
  233. unsigned int i;
  234. spin_lock_irqsave(&ccp->cmd_lock, flags);
  235. ccp->suspending = 1;
  236. /* Wake all the queue kthreads to prepare for suspend */
  237. for (i = 0; i < ccp->cmd_q_count; i++)
  238. wake_up_process(ccp->cmd_q[i].kthread);
  239. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  240. /* Wait for all queue kthreads to say they're done */
  241. while (!ccp_queues_suspended(ccp))
  242. wait_event_interruptible(ccp->suspend_queue,
  243. ccp_queues_suspended(ccp));
  244. return 0;
  245. }
  246. static int ccp_pci_resume(struct pci_dev *pdev)
  247. {
  248. struct device *dev = &pdev->dev;
  249. struct ccp_device *ccp = dev_get_drvdata(dev);
  250. unsigned long flags;
  251. unsigned int i;
  252. spin_lock_irqsave(&ccp->cmd_lock, flags);
  253. ccp->suspending = 0;
  254. /* Wake up all the kthreads */
  255. for (i = 0; i < ccp->cmd_q_count; i++) {
  256. ccp->cmd_q[i].suspended = 0;
  257. wake_up_process(ccp->cmd_q[i].kthread);
  258. }
  259. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  260. return 0;
  261. }
  262. #endif
  263. static DEFINE_PCI_DEVICE_TABLE(ccp_pci_table) = {
  264. { PCI_VDEVICE(AMD, 0x1537), },
  265. /* Last entry must be zero */
  266. { 0, }
  267. };
  268. MODULE_DEVICE_TABLE(pci, ccp_pci_table);
  269. static struct pci_driver ccp_pci_driver = {
  270. .name = "AMD Cryptographic Coprocessor",
  271. .id_table = ccp_pci_table,
  272. .probe = ccp_pci_probe,
  273. .remove = ccp_pci_remove,
  274. #ifdef CONFIG_PM
  275. .suspend = ccp_pci_suspend,
  276. .resume = ccp_pci_resume,
  277. #endif
  278. };
  279. int ccp_pci_init(void)
  280. {
  281. return pci_register_driver(&ccp_pci_driver);
  282. }
  283. void ccp_pci_exit(void)
  284. {
  285. pci_unregister_driver(&ccp_pci_driver);
  286. }