tegra.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /*
  2. * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. */
  22. #include <core/tegra.h>
  23. #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
  24. #include "priv.h"
  25. static int
  26. nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
  27. {
  28. int ret;
  29. ret = regulator_enable(tdev->vdd);
  30. if (ret)
  31. goto err_power;
  32. ret = clk_prepare_enable(tdev->clk);
  33. if (ret)
  34. goto err_clk;
  35. if (tdev->clk_ref) {
  36. ret = clk_prepare_enable(tdev->clk_ref);
  37. if (ret)
  38. goto err_clk_ref;
  39. }
  40. ret = clk_prepare_enable(tdev->clk_pwr);
  41. if (ret)
  42. goto err_clk_pwr;
  43. clk_set_rate(tdev->clk_pwr, 204000000);
  44. udelay(10);
  45. reset_control_assert(tdev->rst);
  46. udelay(10);
  47. ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
  48. if (ret)
  49. goto err_clamp;
  50. udelay(10);
  51. reset_control_deassert(tdev->rst);
  52. udelay(10);
  53. return 0;
  54. err_clamp:
  55. clk_disable_unprepare(tdev->clk_pwr);
  56. err_clk_pwr:
  57. if (tdev->clk_ref)
  58. clk_disable_unprepare(tdev->clk_ref);
  59. err_clk_ref:
  60. clk_disable_unprepare(tdev->clk);
  61. err_clk:
  62. regulator_disable(tdev->vdd);
  63. err_power:
  64. return ret;
  65. }
  66. static int
  67. nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
  68. {
  69. reset_control_assert(tdev->rst);
  70. udelay(10);
  71. clk_disable_unprepare(tdev->clk_pwr);
  72. if (tdev->clk_ref)
  73. clk_disable_unprepare(tdev->clk_ref);
  74. clk_disable_unprepare(tdev->clk);
  75. udelay(10);
  76. return regulator_disable(tdev->vdd);
  77. }
  78. static void
  79. nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
  80. {
  81. #if IS_ENABLED(CONFIG_IOMMU_API)
  82. struct device *dev = &tdev->pdev->dev;
  83. unsigned long pgsize_bitmap;
  84. int ret;
  85. if (!tdev->func->iommu_bit)
  86. return;
  87. mutex_init(&tdev->iommu.mutex);
  88. if (iommu_present(&platform_bus_type)) {
  89. tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
  90. if (!tdev->iommu.domain)
  91. goto error;
  92. /*
  93. * A IOMMU is only usable if it supports page sizes smaller
  94. * or equal to the system's PAGE_SIZE, with a preference if
  95. * both are equal.
  96. */
  97. pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
  98. if (pgsize_bitmap & PAGE_SIZE) {
  99. tdev->iommu.pgshift = PAGE_SHIFT;
  100. } else {
  101. tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
  102. if (tdev->iommu.pgshift == 0) {
  103. dev_warn(dev, "unsupported IOMMU page size\n");
  104. goto free_domain;
  105. }
  106. tdev->iommu.pgshift -= 1;
  107. }
  108. ret = iommu_attach_device(tdev->iommu.domain, dev);
  109. if (ret)
  110. goto free_domain;
  111. ret = nvkm_mm_init(&tdev->iommu.mm, 0,
  112. (1ULL << tdev->func->iommu_bit) >>
  113. tdev->iommu.pgshift, 1);
  114. if (ret)
  115. goto detach_device;
  116. }
  117. return;
  118. detach_device:
  119. iommu_detach_device(tdev->iommu.domain, dev);
  120. free_domain:
  121. iommu_domain_free(tdev->iommu.domain);
  122. error:
  123. tdev->iommu.domain = NULL;
  124. tdev->iommu.pgshift = 0;
  125. dev_err(dev, "cannot initialize IOMMU MM\n");
  126. #endif
  127. }
  128. static void
  129. nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
  130. {
  131. #if IS_ENABLED(CONFIG_IOMMU_API)
  132. if (tdev->iommu.domain) {
  133. nvkm_mm_fini(&tdev->iommu.mm);
  134. iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
  135. iommu_domain_free(tdev->iommu.domain);
  136. }
  137. #endif
  138. }
  139. static struct nvkm_device_tegra *
  140. nvkm_device_tegra(struct nvkm_device *device)
  141. {
  142. return container_of(device, struct nvkm_device_tegra, device);
  143. }
  144. static struct resource *
  145. nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
  146. {
  147. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  148. return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
  149. }
  150. static resource_size_t
  151. nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
  152. {
  153. struct resource *res = nvkm_device_tegra_resource(device, bar);
  154. return res ? res->start : 0;
  155. }
  156. static resource_size_t
  157. nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
  158. {
  159. struct resource *res = nvkm_device_tegra_resource(device, bar);
  160. return res ? resource_size(res) : 0;
  161. }
  162. static irqreturn_t
  163. nvkm_device_tegra_intr(int irq, void *arg)
  164. {
  165. struct nvkm_device_tegra *tdev = arg;
  166. struct nvkm_device *device = &tdev->device;
  167. bool handled = false;
  168. nvkm_mc_intr_unarm(device);
  169. nvkm_mc_intr(device, &handled);
  170. nvkm_mc_intr_rearm(device);
  171. return handled ? IRQ_HANDLED : IRQ_NONE;
  172. }
  173. static void
  174. nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
  175. {
  176. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  177. if (tdev->irq) {
  178. free_irq(tdev->irq, tdev);
  179. tdev->irq = 0;
  180. };
  181. }
  182. static int
  183. nvkm_device_tegra_init(struct nvkm_device *device)
  184. {
  185. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  186. int irq, ret;
  187. irq = platform_get_irq_byname(tdev->pdev, "stall");
  188. if (irq < 0)
  189. return irq;
  190. ret = request_irq(irq, nvkm_device_tegra_intr,
  191. IRQF_SHARED, "nvkm", tdev);
  192. if (ret)
  193. return ret;
  194. tdev->irq = irq;
  195. return 0;
  196. }
  197. static void *
  198. nvkm_device_tegra_dtor(struct nvkm_device *device)
  199. {
  200. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  201. nvkm_device_tegra_power_down(tdev);
  202. nvkm_device_tegra_remove_iommu(tdev);
  203. return tdev;
  204. }
  205. static const struct nvkm_device_func
  206. nvkm_device_tegra_func = {
  207. .tegra = nvkm_device_tegra,
  208. .dtor = nvkm_device_tegra_dtor,
  209. .init = nvkm_device_tegra_init,
  210. .fini = nvkm_device_tegra_fini,
  211. .resource_addr = nvkm_device_tegra_resource_addr,
  212. .resource_size = nvkm_device_tegra_resource_size,
  213. .cpu_coherent = false,
  214. };
  215. int
  216. nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
  217. struct platform_device *pdev,
  218. const char *cfg, const char *dbg,
  219. bool detect, bool mmio, u64 subdev_mask,
  220. struct nvkm_device **pdevice)
  221. {
  222. struct nvkm_device_tegra *tdev;
  223. int ret;
  224. if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
  225. return -ENOMEM;
  226. tdev->func = func;
  227. tdev->pdev = pdev;
  228. tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
  229. if (IS_ERR(tdev->vdd)) {
  230. ret = PTR_ERR(tdev->vdd);
  231. goto free;
  232. }
  233. tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
  234. if (IS_ERR(tdev->rst)) {
  235. ret = PTR_ERR(tdev->rst);
  236. goto free;
  237. }
  238. tdev->clk = devm_clk_get(&pdev->dev, "gpu");
  239. if (IS_ERR(tdev->clk)) {
  240. ret = PTR_ERR(tdev->clk);
  241. goto free;
  242. }
  243. if (func->require_ref_clk)
  244. tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
  245. if (IS_ERR(tdev->clk_ref)) {
  246. ret = PTR_ERR(tdev->clk_ref);
  247. goto free;
  248. }
  249. tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
  250. if (IS_ERR(tdev->clk_pwr)) {
  251. ret = PTR_ERR(tdev->clk_pwr);
  252. goto free;
  253. }
  254. /**
  255. * The IOMMU bit defines the upper limit of the GPU-addressable space.
  256. * This will be refined in nouveau_ttm_init but we need to do it early
  257. * for instmem to behave properly
  258. */
  259. ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
  260. if (ret)
  261. goto free;
  262. nvkm_device_tegra_probe_iommu(tdev);
  263. ret = nvkm_device_tegra_power_up(tdev);
  264. if (ret)
  265. goto remove;
  266. tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
  267. tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
  268. ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
  269. NVKM_DEVICE_TEGRA, pdev->id, NULL,
  270. cfg, dbg, detect, mmio, subdev_mask,
  271. &tdev->device);
  272. if (ret)
  273. goto powerdown;
  274. *pdevice = &tdev->device;
  275. return 0;
  276. powerdown:
  277. nvkm_device_tegra_power_down(tdev);
  278. remove:
  279. nvkm_device_tegra_remove_iommu(tdev);
  280. free:
  281. kfree(tdev);
  282. return ret;
  283. }
  284. #else
  285. int
  286. nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
  287. struct platform_device *pdev,
  288. const char *cfg, const char *dbg,
  289. bool detect, bool mmio, u64 subdev_mask,
  290. struct nvkm_device **pdevice)
  291. {
  292. return -ENOSYS;
  293. }
  294. #endif