tegra.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. */
  22. #include <core/tegra.h>
  23. #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
  24. #include "priv.h"
  25. static int
  26. nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
  27. {
  28. int ret;
  29. if (tdev->vdd) {
  30. ret = regulator_enable(tdev->vdd);
  31. if (ret)
  32. goto err_power;
  33. }
  34. ret = clk_prepare_enable(tdev->clk);
  35. if (ret)
  36. goto err_clk;
  37. if (tdev->clk_ref) {
  38. ret = clk_prepare_enable(tdev->clk_ref);
  39. if (ret)
  40. goto err_clk_ref;
  41. }
  42. ret = clk_prepare_enable(tdev->clk_pwr);
  43. if (ret)
  44. goto err_clk_pwr;
  45. clk_set_rate(tdev->clk_pwr, 204000000);
  46. udelay(10);
  47. reset_control_assert(tdev->rst);
  48. udelay(10);
  49. ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
  50. if (ret)
  51. goto err_clamp;
  52. udelay(10);
  53. reset_control_deassert(tdev->rst);
  54. udelay(10);
  55. return 0;
  56. err_clamp:
  57. clk_disable_unprepare(tdev->clk_pwr);
  58. err_clk_pwr:
  59. if (tdev->clk_ref)
  60. clk_disable_unprepare(tdev->clk_ref);
  61. err_clk_ref:
  62. clk_disable_unprepare(tdev->clk);
  63. err_clk:
  64. if (tdev->vdd)
  65. regulator_disable(tdev->vdd);
  66. err_power:
  67. return ret;
  68. }
  69. static int
  70. nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
  71. {
  72. int ret;
  73. reset_control_assert(tdev->rst);
  74. udelay(10);
  75. clk_disable_unprepare(tdev->clk_pwr);
  76. if (tdev->clk_ref)
  77. clk_disable_unprepare(tdev->clk_ref);
  78. clk_disable_unprepare(tdev->clk);
  79. udelay(10);
  80. if (tdev->vdd) {
  81. ret = regulator_disable(tdev->vdd);
  82. if (ret)
  83. return ret;
  84. }
  85. return 0;
  86. }
  87. static void
  88. nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
  89. {
  90. #if IS_ENABLED(CONFIG_IOMMU_API)
  91. struct device *dev = &tdev->pdev->dev;
  92. unsigned long pgsize_bitmap;
  93. int ret;
  94. if (!tdev->func->iommu_bit)
  95. return;
  96. mutex_init(&tdev->iommu.mutex);
  97. if (iommu_present(&platform_bus_type)) {
  98. tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
  99. if (!tdev->iommu.domain)
  100. goto error;
  101. /*
  102. * A IOMMU is only usable if it supports page sizes smaller
  103. * or equal to the system's PAGE_SIZE, with a preference if
  104. * both are equal.
  105. */
  106. pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
  107. if (pgsize_bitmap & PAGE_SIZE) {
  108. tdev->iommu.pgshift = PAGE_SHIFT;
  109. } else {
  110. tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
  111. if (tdev->iommu.pgshift == 0) {
  112. dev_warn(dev, "unsupported IOMMU page size\n");
  113. goto free_domain;
  114. }
  115. tdev->iommu.pgshift -= 1;
  116. }
  117. ret = iommu_attach_device(tdev->iommu.domain, dev);
  118. if (ret)
  119. goto free_domain;
  120. ret = nvkm_mm_init(&tdev->iommu.mm, 0,
  121. (1ULL << tdev->func->iommu_bit) >>
  122. tdev->iommu.pgshift, 1);
  123. if (ret)
  124. goto detach_device;
  125. }
  126. return;
  127. detach_device:
  128. iommu_detach_device(tdev->iommu.domain, dev);
  129. free_domain:
  130. iommu_domain_free(tdev->iommu.domain);
  131. error:
  132. tdev->iommu.domain = NULL;
  133. tdev->iommu.pgshift = 0;
  134. dev_err(dev, "cannot initialize IOMMU MM\n");
  135. #endif
  136. }
  137. static void
  138. nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
  139. {
  140. #if IS_ENABLED(CONFIG_IOMMU_API)
  141. if (tdev->iommu.domain) {
  142. nvkm_mm_fini(&tdev->iommu.mm);
  143. iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
  144. iommu_domain_free(tdev->iommu.domain);
  145. }
  146. #endif
  147. }
  148. static struct nvkm_device_tegra *
  149. nvkm_device_tegra(struct nvkm_device *device)
  150. {
  151. return container_of(device, struct nvkm_device_tegra, device);
  152. }
  153. static struct resource *
  154. nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
  155. {
  156. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  157. return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
  158. }
  159. static resource_size_t
  160. nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
  161. {
  162. struct resource *res = nvkm_device_tegra_resource(device, bar);
  163. return res ? res->start : 0;
  164. }
  165. static resource_size_t
  166. nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
  167. {
  168. struct resource *res = nvkm_device_tegra_resource(device, bar);
  169. return res ? resource_size(res) : 0;
  170. }
  171. static irqreturn_t
  172. nvkm_device_tegra_intr(int irq, void *arg)
  173. {
  174. struct nvkm_device_tegra *tdev = arg;
  175. struct nvkm_device *device = &tdev->device;
  176. bool handled = false;
  177. nvkm_mc_intr_unarm(device);
  178. nvkm_mc_intr(device, &handled);
  179. nvkm_mc_intr_rearm(device);
  180. return handled ? IRQ_HANDLED : IRQ_NONE;
  181. }
  182. static void
  183. nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
  184. {
  185. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  186. if (tdev->irq) {
  187. free_irq(tdev->irq, tdev);
  188. tdev->irq = 0;
  189. };
  190. }
  191. static int
  192. nvkm_device_tegra_init(struct nvkm_device *device)
  193. {
  194. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  195. int irq, ret;
  196. irq = platform_get_irq_byname(tdev->pdev, "stall");
  197. if (irq < 0)
  198. return irq;
  199. ret = request_irq(irq, nvkm_device_tegra_intr,
  200. IRQF_SHARED, "nvkm", tdev);
  201. if (ret)
  202. return ret;
  203. tdev->irq = irq;
  204. return 0;
  205. }
  206. static void *
  207. nvkm_device_tegra_dtor(struct nvkm_device *device)
  208. {
  209. struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
  210. nvkm_device_tegra_power_down(tdev);
  211. nvkm_device_tegra_remove_iommu(tdev);
  212. return tdev;
  213. }
  214. static const struct nvkm_device_func
  215. nvkm_device_tegra_func = {
  216. .tegra = nvkm_device_tegra,
  217. .dtor = nvkm_device_tegra_dtor,
  218. .init = nvkm_device_tegra_init,
  219. .fini = nvkm_device_tegra_fini,
  220. .resource_addr = nvkm_device_tegra_resource_addr,
  221. .resource_size = nvkm_device_tegra_resource_size,
  222. .cpu_coherent = false,
  223. };
  224. int
  225. nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
  226. struct platform_device *pdev,
  227. const char *cfg, const char *dbg,
  228. bool detect, bool mmio, u64 subdev_mask,
  229. struct nvkm_device **pdevice)
  230. {
  231. struct nvkm_device_tegra *tdev;
  232. int ret;
  233. if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
  234. return -ENOMEM;
  235. tdev->func = func;
  236. tdev->pdev = pdev;
  237. if (func->require_vdd) {
  238. tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
  239. if (IS_ERR(tdev->vdd)) {
  240. ret = PTR_ERR(tdev->vdd);
  241. goto free;
  242. }
  243. }
  244. tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
  245. if (IS_ERR(tdev->rst)) {
  246. ret = PTR_ERR(tdev->rst);
  247. goto free;
  248. }
  249. tdev->clk = devm_clk_get(&pdev->dev, "gpu");
  250. if (IS_ERR(tdev->clk)) {
  251. ret = PTR_ERR(tdev->clk);
  252. goto free;
  253. }
  254. if (func->require_ref_clk)
  255. tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
  256. if (IS_ERR(tdev->clk_ref)) {
  257. ret = PTR_ERR(tdev->clk_ref);
  258. goto free;
  259. }
  260. tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
  261. if (IS_ERR(tdev->clk_pwr)) {
  262. ret = PTR_ERR(tdev->clk_pwr);
  263. goto free;
  264. }
  265. /**
  266. * The IOMMU bit defines the upper limit of the GPU-addressable space.
  267. * This will be refined in nouveau_ttm_init but we need to do it early
  268. * for instmem to behave properly
  269. */
  270. ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
  271. if (ret)
  272. goto free;
  273. nvkm_device_tegra_probe_iommu(tdev);
  274. ret = nvkm_device_tegra_power_up(tdev);
  275. if (ret)
  276. goto remove;
  277. tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
  278. tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
  279. ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
  280. NVKM_DEVICE_TEGRA, pdev->id, NULL,
  281. cfg, dbg, detect, mmio, subdev_mask,
  282. &tdev->device);
  283. if (ret)
  284. goto powerdown;
  285. *pdevice = &tdev->device;
  286. return 0;
  287. powerdown:
  288. nvkm_device_tegra_power_down(tdev);
  289. remove:
  290. nvkm_device_tegra_remove_iommu(tdev);
  291. free:
  292. kfree(tdev);
  293. return ret;
  294. }
  295. #else
  296. int
  297. nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
  298. struct platform_device *pdev,
  299. const char *cfg, const char *dbg,
  300. bool detect, bool mmio, u64 subdev_mask,
  301. struct nvkm_device **pdevice)
  302. {
  303. return -ENOSYS;
  304. }
  305. #endif