ccp-platform.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2014 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/device.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/ioport.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/kthread.h>
  19. #include <linux/sched.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/delay.h>
  23. #include <linux/ccp.h>
  24. #include <linux/of.h>
  25. #include <linux/of_address.h>
  26. #include <linux/acpi.h>
  27. #include "ccp-dev.h"
  28. struct ccp_platform {
  29. int use_acpi;
  30. int coherent;
  31. };
  32. static int ccp_get_irq(struct ccp_device *ccp)
  33. {
  34. struct device *dev = ccp->dev;
  35. struct platform_device *pdev = container_of(dev,
  36. struct platform_device, dev);
  37. int ret;
  38. ret = platform_get_irq(pdev, 0);
  39. if (ret < 0)
  40. return ret;
  41. ccp->irq = ret;
  42. ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
  43. if (ret) {
  44. dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
  45. return ret;
  46. }
  47. return 0;
  48. }
  49. static int ccp_get_irqs(struct ccp_device *ccp)
  50. {
  51. struct device *dev = ccp->dev;
  52. int ret;
  53. ret = ccp_get_irq(ccp);
  54. if (!ret)
  55. return 0;
  56. /* Couldn't get an interrupt */
  57. dev_notice(dev, "could not enable interrupts (%d)\n", ret);
  58. return ret;
  59. }
  60. static void ccp_free_irqs(struct ccp_device *ccp)
  61. {
  62. struct device *dev = ccp->dev;
  63. free_irq(ccp->irq, dev);
  64. }
  65. static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
  66. {
  67. struct device *dev = ccp->dev;
  68. struct platform_device *pdev = container_of(dev,
  69. struct platform_device, dev);
  70. struct resource *ior;
  71. ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  72. if (ior && (resource_size(ior) >= 0x800))
  73. return ior;
  74. return NULL;
  75. }
  76. #ifdef CONFIG_ACPI
  77. static int ccp_acpi_support(struct ccp_device *ccp)
  78. {
  79. struct ccp_platform *ccp_platform = ccp->dev_specific;
  80. struct acpi_device *adev = ACPI_COMPANION(ccp->dev);
  81. acpi_handle handle;
  82. acpi_status status;
  83. unsigned long long data;
  84. int cca;
  85. /* Retrieve the device cache coherency value */
  86. handle = adev->handle;
  87. do {
  88. status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
  89. if (!ACPI_FAILURE(status)) {
  90. cca = data;
  91. break;
  92. }
  93. } while (!ACPI_FAILURE(status));
  94. if (ACPI_FAILURE(status)) {
  95. dev_err(ccp->dev, "error obtaining acpi coherency value\n");
  96. return -EINVAL;
  97. }
  98. ccp_platform->coherent = !!cca;
  99. return 0;
  100. }
  101. #else /* CONFIG_ACPI */
  102. static int ccp_acpi_support(struct ccp_device *ccp)
  103. {
  104. return -EINVAL;
  105. }
  106. #endif
  107. #ifdef CONFIG_OF
  108. static int ccp_of_support(struct ccp_device *ccp)
  109. {
  110. struct ccp_platform *ccp_platform = ccp->dev_specific;
  111. ccp_platform->coherent = of_dma_is_coherent(ccp->dev->of_node);
  112. return 0;
  113. }
  114. #else
  115. static int ccp_of_support(struct ccp_device *ccp)
  116. {
  117. return -EINVAL;
  118. }
  119. #endif
  120. static int ccp_platform_probe(struct platform_device *pdev)
  121. {
  122. struct ccp_device *ccp;
  123. struct ccp_platform *ccp_platform;
  124. struct device *dev = &pdev->dev;
  125. struct acpi_device *adev = ACPI_COMPANION(dev);
  126. struct resource *ior;
  127. int ret;
  128. ret = -ENOMEM;
  129. ccp = ccp_alloc_struct(dev);
  130. if (!ccp)
  131. goto e_err;
  132. ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL);
  133. if (!ccp_platform)
  134. goto e_err;
  135. ccp->dev_specific = ccp_platform;
  136. ccp->get_irq = ccp_get_irqs;
  137. ccp->free_irq = ccp_free_irqs;
  138. ccp_platform->use_acpi = (!adev || acpi_disabled) ? 0 : 1;
  139. ior = ccp_find_mmio_area(ccp);
  140. ccp->io_map = devm_ioremap_resource(dev, ior);
  141. if (IS_ERR(ccp->io_map)) {
  142. ret = PTR_ERR(ccp->io_map);
  143. goto e_err;
  144. }
  145. ccp->io_regs = ccp->io_map;
  146. if (!dev->dma_mask)
  147. dev->dma_mask = &dev->coherent_dma_mask;
  148. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
  149. if (ret) {
  150. dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
  151. goto e_err;
  152. }
  153. if (ccp_platform->use_acpi)
  154. ret = ccp_acpi_support(ccp);
  155. else
  156. ret = ccp_of_support(ccp);
  157. if (ret)
  158. goto e_err;
  159. if (ccp_platform->coherent)
  160. ccp->axcache = CACHE_WB_NO_ALLOC;
  161. else
  162. ccp->axcache = CACHE_NONE;
  163. dev_set_drvdata(dev, ccp);
  164. ret = ccp_init(ccp);
  165. if (ret)
  166. goto e_err;
  167. dev_notice(dev, "enabled\n");
  168. return 0;
  169. e_err:
  170. dev_notice(dev, "initialization failed\n");
  171. return ret;
  172. }
  173. static int ccp_platform_remove(struct platform_device *pdev)
  174. {
  175. struct device *dev = &pdev->dev;
  176. struct ccp_device *ccp = dev_get_drvdata(dev);
  177. ccp_destroy(ccp);
  178. dev_notice(dev, "disabled\n");
  179. return 0;
  180. }
  181. #ifdef CONFIG_PM
  182. static int ccp_platform_suspend(struct platform_device *pdev,
  183. pm_message_t state)
  184. {
  185. struct device *dev = &pdev->dev;
  186. struct ccp_device *ccp = dev_get_drvdata(dev);
  187. unsigned long flags;
  188. unsigned int i;
  189. spin_lock_irqsave(&ccp->cmd_lock, flags);
  190. ccp->suspending = 1;
  191. /* Wake all the queue kthreads to prepare for suspend */
  192. for (i = 0; i < ccp->cmd_q_count; i++)
  193. wake_up_process(ccp->cmd_q[i].kthread);
  194. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  195. /* Wait for all queue kthreads to say they're done */
  196. while (!ccp_queues_suspended(ccp))
  197. wait_event_interruptible(ccp->suspend_queue,
  198. ccp_queues_suspended(ccp));
  199. return 0;
  200. }
  201. static int ccp_platform_resume(struct platform_device *pdev)
  202. {
  203. struct device *dev = &pdev->dev;
  204. struct ccp_device *ccp = dev_get_drvdata(dev);
  205. unsigned long flags;
  206. unsigned int i;
  207. spin_lock_irqsave(&ccp->cmd_lock, flags);
  208. ccp->suspending = 0;
  209. /* Wake up all the kthreads */
  210. for (i = 0; i < ccp->cmd_q_count; i++) {
  211. ccp->cmd_q[i].suspended = 0;
  212. wake_up_process(ccp->cmd_q[i].kthread);
  213. }
  214. spin_unlock_irqrestore(&ccp->cmd_lock, flags);
  215. return 0;
  216. }
  217. #endif
  218. #ifdef CONFIG_ACPI
  219. static const struct acpi_device_id ccp_acpi_match[] = {
  220. { "AMDI0C00", 0 },
  221. { },
  222. };
  223. #endif
  224. #ifdef CONFIG_OF
  225. static const struct of_device_id ccp_of_match[] = {
  226. { .compatible = "amd,ccp-seattle-v1a" },
  227. { },
  228. };
  229. #endif
  230. static struct platform_driver ccp_platform_driver = {
  231. .driver = {
  232. .name = "AMD Cryptographic Coprocessor",
  233. #ifdef CONFIG_ACPI
  234. .acpi_match_table = ccp_acpi_match,
  235. #endif
  236. #ifdef CONFIG_OF
  237. .of_match_table = ccp_of_match,
  238. #endif
  239. },
  240. .probe = ccp_platform_probe,
  241. .remove = ccp_platform_remove,
  242. #ifdef CONFIG_PM
  243. .suspend = ccp_platform_suspend,
  244. .resume = ccp_platform_resume,
  245. #endif
  246. };
  247. int ccp_platform_init(void)
  248. {
  249. return platform_driver_register(&ccp_platform_driver);
  250. }
  251. void ccp_platform_exit(void)
  252. {
  253. platform_driver_unregister(&ccp_platform_driver);
  254. }