tegra.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. */
  22. #include <core/tegra.h>
  23. #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
  24. #include "priv.h"
  25. static int
  26. nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
  27. {
  28. int ret;
  29. ret = regulator_enable(tdev->vdd);
  30. if (ret)
  31. goto err_power;
  32. ret = clk_prepare_enable(tdev->clk);
  33. if (ret)
  34. goto err_clk;
  35. ret = clk_prepare_enable(tdev->clk_pwr);
  36. if (ret)
  37. goto err_clk_pwr;
  38. clk_set_rate(tdev->clk_pwr, 204000000);
  39. udelay(10);
  40. reset_control_assert(tdev->rst);
  41. udelay(10);
  42. ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
  43. if (ret)
  44. goto err_clamp;
  45. udelay(10);
  46. reset_control_deassert(tdev->rst);
  47. udelay(10);
  48. return 0;
  49. err_clamp:
  50. clk_disable_unprepare(tdev->clk_pwr);
  51. err_clk_pwr:
  52. clk_disable_unprepare(tdev->clk);
  53. err_clk:
  54. regulator_disable(tdev->vdd);
  55. err_power:
  56. return ret;
  57. }
  58. static int
  59. nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
  60. {
  61. reset_control_assert(tdev->rst);
  62. udelay(10);
  63. clk_disable_unprepare(tdev->clk_pwr);
  64. clk_disable_unprepare(tdev->clk);
  65. udelay(10);
  66. return regulator_disable(tdev->vdd);
  67. }
  68. static void
  69. nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
  70. {
  71. #if IS_ENABLED(CONFIG_IOMMU_API)
  72. struct device *dev = &tdev->pdev->dev;
  73. unsigned long pgsize_bitmap;
  74. int ret;
  75. mutex_init(&tdev->iommu.mutex);
  76. if (iommu_present(&platform_bus_type)) {
  77. tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
  78. if (IS_ERR(tdev->iommu.domain))
  79. goto error;
  80. /*
  81. * A IOMMU is only usable if it supports page sizes smaller
  82. * or equal to the system's PAGE_SIZE, with a preference if
  83. * both are equal.
  84. */
  85. pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
  86. if (pgsize_bitmap & PAGE_SIZE) {
  87. tdev->iommu.pgshift = PAGE_SHIFT;
  88. } else {
  89. tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
  90. if (tdev->iommu.pgshift == 0) {
  91. dev_warn(dev, "unsupported IOMMU page size\n");
  92. goto free_domain;
  93. }
  94. tdev->iommu.pgshift -= 1;
  95. }
  96. ret = iommu_attach_device(tdev->iommu.domain, dev);
  97. if (ret)
  98. goto free_domain;
  99. ret = nvkm_mm_init(&tdev->iommu.mm, 0,
  100. (1ULL << 40) >> tdev->iommu.pgshift, 1);
  101. if (ret)
  102. goto detach_device;
  103. }
  104. return;
  105. detach_device:
  106. iommu_detach_device(tdev->iommu.domain, dev);
  107. free_domain:
  108. iommu_domain_free(tdev->iommu.domain);
  109. error:
  110. tdev->iommu.domain = NULL;
  111. tdev->iommu.pgshift = 0;
  112. dev_err(dev, "cannot initialize IOMMU MM\n");
  113. #endif
  114. }
  115. static void
  116. nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
  117. {
  118. #if IS_ENABLED(CONFIG_IOMMU_API)
  119. if (tdev->iommu.domain) {
  120. nvkm_mm_fini(&tdev->iommu.mm);
  121. iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
  122. iommu_domain_free(tdev->iommu.domain);
  123. }
  124. #endif
  125. }
  126. static struct nvkm_device_tegra *
  127. nvkm_device_tegra(struct nvkm_device *device)
  128. {
  129. return container_of(device, struct nvkm_device_tegra, device);
  130. }
  131. static struct resource *
  132. nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
  133. {
  134. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  135. return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
  136. }
  137. static resource_size_t
  138. nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
  139. {
  140. struct resource *res = nvkm_device_tegra_resource(device, bar);
  141. return res ? res->start : 0;
  142. }
  143. static resource_size_t
  144. nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
  145. {
  146. struct resource *res = nvkm_device_tegra_resource(device, bar);
  147. return res ? resource_size(res) : 0;
  148. }
  149. static irqreturn_t
  150. nvkm_device_tegra_intr(int irq, void *arg)
  151. {
  152. struct nvkm_device_tegra *tdev = arg;
  153. struct nvkm_mc *mc = tdev->device.mc;
  154. bool handled = false;
  155. if (likely(mc)) {
  156. nvkm_mc_intr_unarm(mc);
  157. nvkm_mc_intr(mc, &handled);
  158. nvkm_mc_intr_rearm(mc);
  159. }
  160. return handled ? IRQ_HANDLED : IRQ_NONE;
  161. }
  162. static void
  163. nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
  164. {
  165. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  166. if (tdev->irq) {
  167. free_irq(tdev->irq, tdev);
  168. tdev->irq = 0;
  169. };
  170. }
  171. static int
  172. nvkm_device_tegra_init(struct nvkm_device *device)
  173. {
  174. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  175. int irq, ret;
  176. irq = platform_get_irq_byname(tdev->pdev, "stall");
  177. if (irq < 0)
  178. return irq;
  179. ret = request_irq(irq, nvkm_device_tegra_intr,
  180. IRQF_SHARED, "nvkm", tdev);
  181. if (ret)
  182. return ret;
  183. tdev->irq = irq;
  184. return 0;
  185. }
  186. static void *
  187. nvkm_device_tegra_dtor(struct nvkm_device *device)
  188. {
  189. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  190. nvkm_device_tegra_power_down(tdev);
  191. nvkm_device_tegra_remove_iommu(tdev);
  192. return tdev;
  193. }
  194. static const struct nvkm_device_func
  195. nvkm_device_tegra_func = {
  196. .tegra = nvkm_device_tegra,
  197. .dtor = nvkm_device_tegra_dtor,
  198. .init = nvkm_device_tegra_init,
  199. .fini = nvkm_device_tegra_fini,
  200. .resource_addr = nvkm_device_tegra_resource_addr,
  201. .resource_size = nvkm_device_tegra_resource_size,
  202. .cpu_coherent = false,
  203. };
  204. int
  205. nvkm_device_tegra_new(struct platform_device *pdev,
  206. const char *cfg, const char *dbg,
  207. bool detect, bool mmio, u64 subdev_mask,
  208. struct nvkm_device **pdevice)
  209. {
  210. struct nvkm_device_tegra *tdev;
  211. int ret;
  212. if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
  213. return -ENOMEM;
  214. *pdevice = &tdev->device;
  215. tdev->pdev = pdev;
  216. tdev->irq = -1;
  217. tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
  218. if (IS_ERR(tdev->vdd))
  219. return PTR_ERR(tdev->vdd);
  220. tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
  221. if (IS_ERR(tdev->rst))
  222. return PTR_ERR(tdev->rst);
  223. tdev->clk = devm_clk_get(&pdev->dev, "gpu");
  224. if (IS_ERR(tdev->clk))
  225. return PTR_ERR(tdev->clk);
  226. tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
  227. if (IS_ERR(tdev->clk_pwr))
  228. return PTR_ERR(tdev->clk_pwr);
  229. nvkm_device_tegra_probe_iommu(tdev);
  230. ret = nvkm_device_tegra_power_up(tdev);
  231. if (ret)
  232. return ret;
  233. tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
  234. ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
  235. NVKM_DEVICE_TEGRA, pdev->id, NULL,
  236. cfg, dbg, detect, mmio, subdev_mask,
  237. &tdev->device);
  238. if (ret)
  239. return ret;
  240. return 0;
  241. }
  242. #else
  243. int
  244. nvkm_device_tegra_new(struct platform_device *pdev,
  245. const char *cfg, const char *dbg,
  246. bool detect, bool mmio, u64 subdev_mask,
  247. struct nvkm_device **pdevice)
  248. {
  249. return -ENOSYS;
  250. }
  251. #endif