nv50.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "nv50.h"
  25. #include <core/gpuobj.h>
  26. #include <subdev/fb.h>
  27. #include <subdev/mmu.h>
  28. #include <subdev/timer.h>
  29. struct nvkm_vm *
  30. nv50_bar_kmap(struct nvkm_bar *base)
  31. {
  32. return nv50_bar(base)->bar3_vm;
  33. }
  34. int
  35. nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
  36. {
  37. struct nv50_bar *bar = nv50_bar(base);
  38. return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
  39. }
  40. static void
  41. nv50_bar_flush(struct nvkm_bar *base)
  42. {
  43. struct nv50_bar *bar = nv50_bar(base);
  44. struct nvkm_device *device = bar->base.subdev.device;
  45. unsigned long flags;
  46. spin_lock_irqsave(&bar->base.lock, flags);
  47. nvkm_wr32(device, 0x00330c, 0x00000001);
  48. nvkm_msec(device, 2000,
  49. if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
  50. break;
  51. );
  52. spin_unlock_irqrestore(&bar->base.lock, flags);
  53. }
  54. int
  55. nv50_bar_oneinit(struct nvkm_bar *base)
  56. {
  57. struct nv50_bar *bar = nv50_bar(base);
  58. struct nvkm_device *device = bar->base.subdev.device;
  59. static struct lock_class_key bar1_lock;
  60. static struct lock_class_key bar3_lock;
  61. struct nvkm_vm *vm;
  62. u64 start, limit;
  63. int ret;
  64. ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
  65. if (ret)
  66. return ret;
  67. ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem,
  68. &bar->pad);
  69. if (ret)
  70. return ret;
  71. ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
  72. if (ret)
  73. return ret;
  74. /* BAR3 */
  75. start = 0x0100000000ULL;
  76. limit = start + device->func->resource_size(device, 3);
  77. ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
  78. if (ret)
  79. return ret;
  80. atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
  81. ret = nvkm_vm_boot(vm, limit-- - start);
  82. if (ret)
  83. return ret;
  84. ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
  85. nvkm_vm_ref(NULL, &vm, NULL);
  86. if (ret)
  87. return ret;
  88. ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
  89. if (ret)
  90. return ret;
  91. nvkm_kmap(bar->bar3);
  92. nvkm_wo32(bar->bar3, 0x00, 0x7fc00000);
  93. nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit));
  94. nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start));
  95. nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
  96. upper_32_bits(start));
  97. nvkm_wo32(bar->bar3, 0x10, 0x00000000);
  98. nvkm_wo32(bar->bar3, 0x14, 0x00000000);
  99. nvkm_done(bar->bar3);
  100. /* BAR1 */
  101. start = 0x0000000000ULL;
  102. limit = start + device->func->resource_size(device, 1);
  103. ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
  104. if (ret)
  105. return ret;
  106. atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
  107. ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
  108. nvkm_vm_ref(NULL, &vm, NULL);
  109. if (ret)
  110. return ret;
  111. ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
  112. if (ret)
  113. return ret;
  114. nvkm_kmap(bar->bar1);
  115. nvkm_wo32(bar->bar1, 0x00, 0x7fc00000);
  116. nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit));
  117. nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start));
  118. nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
  119. upper_32_bits(start));
  120. nvkm_wo32(bar->bar1, 0x10, 0x00000000);
  121. nvkm_wo32(bar->bar1, 0x14, 0x00000000);
  122. nvkm_done(bar->bar1);
  123. return 0;
  124. }
  125. int
  126. nv50_bar_init(struct nvkm_bar *base)
  127. {
  128. struct nv50_bar *bar = nv50_bar(base);
  129. struct nvkm_device *device = bar->base.subdev.device;
  130. int i;
  131. nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
  132. nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
  133. nvkm_wr32(device, 0x100c80, 0x00060001);
  134. if (nvkm_msec(device, 2000,
  135. if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
  136. break;
  137. ) < 0)
  138. return -EBUSY;
  139. nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
  140. nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
  141. nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
  142. nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
  143. for (i = 0; i < 8; i++)
  144. nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
  145. return 0;
  146. }
  147. void *
  148. nv50_bar_dtor(struct nvkm_bar *base)
  149. {
  150. struct nv50_bar *bar = nv50_bar(base);
  151. nvkm_gpuobj_del(&bar->bar1);
  152. nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
  153. nvkm_gpuobj_del(&bar->bar3);
  154. if (bar->bar3_vm) {
  155. nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
  156. nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
  157. }
  158. nvkm_gpuobj_del(&bar->pgd);
  159. nvkm_gpuobj_del(&bar->pad);
  160. nvkm_gpuobj_del(&bar->mem);
  161. return bar;
  162. }
  163. int
  164. nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
  165. int index, u32 pgd_addr, struct nvkm_bar **pbar)
  166. {
  167. struct nv50_bar *bar;
  168. if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
  169. return -ENOMEM;
  170. nvkm_bar_ctor(func, device, index, &bar->base);
  171. bar->pgd_addr = pgd_addr;
  172. *pbar = &bar->base;
  173. return 0;
  174. }
  175. static const struct nvkm_bar_func
  176. nv50_bar_func = {
  177. .dtor = nv50_bar_dtor,
  178. .oneinit = nv50_bar_oneinit,
  179. .init = nv50_bar_init,
  180. .kmap = nv50_bar_kmap,
  181. .umap = nv50_bar_umap,
  182. .flush = nv50_bar_flush,
  183. };
  184. int
  185. nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
  186. {
  187. return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
  188. }